--- # TODO: add ability to configure certificates given either a local file to # point to or certificate contents, set in default cert locations. # Authentication Variable Validation # TODO: validate the different identity provider kinds as well - fail: msg: > Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }} when: - openshift_master_oauth_grant_method is defined - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods # HA Variable Validation - fail: msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" when: - openshift.master.ha | bool - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) - fail: msg: "'native' high availability is not supported for the requested OpenShift version" when: - openshift.master.ha | bool - openshift.master.cluster_method == "native" - not openshift.common.version_gte_3_1_or_1_1 | bool - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: - openshift.master.ha | bool - openshift.master.cluster_method == "pacemaker" - openshift_master_cluster_password is not defined or not openshift_master_cluster_password - fail: msg: "Pacemaker based HA is not supported at this time when used with containerized installs" when: - openshift.master.ha | bool - openshift.master.cluster_method == "pacemaker" - openshift.common.is_containerized | bool - name: Open up firewall ports include: firewall.yml static: yes - name: Install Master package package: name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - not openshift.common.is_containerized | bool - name: Create openshift.common.data_dir file: path: "{{ openshift.common.data_dir }}" state: directory mode: 0755 owner: root group: root when: - openshift.common.is_containerized | bool - name: Reload systemd units command: systemctl daemon-reload when: - openshift.common.is_containerized | bool - name: Re-gather package dependent master facts openshift_facts: - name: Create config parent directory if it does not exist file: path: "{{ openshift_master_config_dir }}" state: directory - name: Create the policy file if it does not already exist command: > {{ openshift.common.client_binary }} adm create-bootstrap-policy-file --filename={{ openshift_master_policy }} args: creates: "{{ openshift_master_policy }}" notify: - restart master api - restart master controllers - name: Create the scheduler config copy: content: "{{ scheduler_config | to_nice_json }}" dest: "{{ openshift_master_scheduler_conf }}" backup: true notify: - restart master api - restart master controllers - name: Install httpd-tools if needed package: name=httpd-tools state=present when: - item.kind == 'HTPasswdPasswordIdentityProvider' - not openshift.common.is_atomic | bool with_items: "{{ openshift.master.identity_providers }}" - name: Ensure htpasswd directory exists file: path: "{{ item.filename | dirname }}" state: directory when: - item.kind == 'HTPasswdPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the htpasswd file if needed template: dest: "{{ item.filename }}" src: htpasswd.j2 backup: yes when: - item.kind == 'HTPasswdPasswordIdentityProvider' - openshift.master.manage_htpasswd | bool with_items: "{{ openshift.master.identity_providers }}" - name: Ensure htpasswd file exists copy: dest: "{{ item.filename }}" force: no content: "" mode: 0600 when: - item.kind == 'HTPasswdPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the ldap ca file if needed copy: dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}" content: "{{ openshift.master.ldap_ca }}" mode: 0600 backup: yes when: - openshift.master.ldap_ca is defined - item.kind == 'LDAPPasswordIdentityProvider' with_items: "{{ openshift.master.identity_providers }}" - name: Create the openid ca file if needed copy: dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}" content: "{{ openshift.master.openid_ca }}" mode: 0600 backup: yes when: - openshift.master.openid_ca is defined - item.kind == 'OpenIDIdentityProvider' - item.ca | default('') != '' with_items: "{{ openshift.master.identity_providers }}" - name: Create the request header ca file if needed copy: dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}" content: "{{ openshift.master.request_header_ca }}" mode: 0600 backup: yes when: - openshift.master.request_header_ca is defined - item.kind == 'RequestHeaderIdentityProvider' - item.clientCA | default('') != '' with_items: "{{ openshift.master.identity_providers }}" # This is an ugly hack to verify settings are in a file without modifying them with lineinfile. # The template file will stomp any other settings made. - block: - name: check whether our docker-registry setting exists in the env file command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" failed_when: false changed_when: false register: l_already_set - set_fact: openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" - name: Set fact of all etcd host IPs openshift_facts: role: common local_facts: no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" - name: Remove the legacy master service if it exists include: clean_systemd_units.yml - name: Install the systemd units include: systemd_units.yml - name: Install Master system container include: system_container.yml when: - openshift.common.is_containerized | bool - openshift.common.is_master_system_container | bool - name: Create session secrets file template: dest: "{{ openshift.master.session_secrets_file }}" src: sessionSecretsFile.yaml.v1.j2 owner: root group: root mode: 0600 when: - openshift.master.session_auth_secrets is defined - openshift.master.session_encryption_secrets is defined notify: - restart master api - set_fact: translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}" # TODO: add the validate parameter when there is a validation command to run - name: Create master config template: dest: "{{ openshift_master_config_file }}" src: master.yaml.v1.j2 backup: true owner: root group: root mode: 0600 notify: - restart master api - restart master controllers - include: set_loopback_context.yml when: - openshift.common.version_gte_3_2_or_1_2 - name: Start and enable master api on first master systemd: name: "{{ openshift.common.service_type }}-master-api" enabled: yes state: started when: - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api when: - l_start_result | failed - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] - pause: seconds: 15 when: - openshift.master.ha | bool - openshift.master.cluster_method == 'native' - name: Start and enable master api all masters systemd: name: "{{ openshift.common.service_type }}-master-api" enabled: yes state: started when: - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-api if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api when: - l_start_result | failed - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. - name: Wait for API to become available # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. command: > curl --silent --tlsv1.2 {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt {% else %} --cacert {{ openshift.common.config_base }}/master/ca.crt {% endif %} {{ openshift.master.api_url }}/healthz/ready register: l_api_available_output until: l_api_available_output.stdout == 'ok' retries: 120 delay: 1 run_once: true changed_when: false when: - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool - name: Start and enable master controller on first master systemd: name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started when: - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-controllers if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers when: - l_start_result | failed - name: Wait for master controller service to start on first master pause: seconds: 15 when: - openshift.master.cluster_method == 'native' - name: Start and enable master controller on all masters systemd: name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started when: - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed retries: 1 delay: 60 - name: Dump logs from master-controllers if it failed command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers when: - l_start_result | failed - set_fact: master_controllers_service_status_changed: "{{ l_start_result | changed }}" when: - openshift.master.cluster_method == 'native' - name: Install cluster packages package: name=pcs state=present when: - openshift.master.cluster_method == 'pacemaker' - not openshift.common.is_containerized | bool register: l_install_result - name: Start and enable cluster service systemd: name: pcsd enabled: yes state: started when: - openshift.master.cluster_method == 'pacemaker' - not openshift.common.is_containerized | bool - name: Set the cluster user password shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster when: - l_install_result | changed