From 78f11c8f671015d40a630208b548d0790ec3a823 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Thu, 16 Nov 2017 14:18:58 -0500 Subject: Playbook Consolidation - openshift-master --- playbooks/aws/openshift-cluster/install.yml | 4 +- .../openshift-cluster/redeploy-certificates.yml | 8 +- .../redeploy-etcd-certificates.yml | 2 +- .../redeploy-master-certificates.yml | 4 +- .../byo/openshift-master/additional_config.yml | 4 - playbooks/byo/openshift-master/certificates.yml | 4 - playbooks/byo/openshift-master/config.yml | 4 - playbooks/byo/openshift-master/filter_plugins | 1 - playbooks/byo/openshift-master/lookup_plugins | 1 - playbooks/byo/openshift-master/restart.yml | 4 - playbooks/byo/openshift-master/roles | 1 - playbooks/byo/openshift-master/scaleup.yml | 23 -- playbooks/common/openshift-cluster/config.yml | 4 +- .../redeploy-certificates/etcd-ca.yml | 2 +- .../redeploy-certificates/openshift-ca.yml | 2 +- .../upgrades/upgrade_control_plane.yml | 4 +- .../openshift-cluster/upgrades/v3_6/upgrade.yml | 2 +- .../upgrades/v3_6/upgrade_control_plane.yml | 2 +- .../openshift-cluster/upgrades/v3_7/upgrade.yml | 2 +- .../upgrades/v3_7/upgrade_control_plane.yml | 2 +- .../openshift-cluster/upgrades/v3_8/upgrade.yml | 2 +- .../upgrades/v3_8/upgrade_control_plane.yml | 2 +- .../common/openshift-master/additional_config.yml | 52 ----- playbooks/common/openshift-master/certificates.yml | 14 -- playbooks/common/openshift-master/config.yml | 252 --------------------- playbooks/common/openshift-master/filter_plugins | 1 - playbooks/common/openshift-master/library | 1 - playbooks/common/openshift-master/lookup_plugins | 1 - playbooks/common/openshift-master/restart.yml | 19 -- .../common/openshift-master/restart_hosts.yml | 40 ---- .../common/openshift-master/restart_services.yml | 4 - .../common/openshift-master/revert-client-ca.yml | 17 -- playbooks/common/openshift-master/roles | 1 - playbooks/common/openshift-master/scaleup.yml | 57 ----- .../common/openshift-master/set_network_facts.yml | 34 --- .../openshift-master/tasks/wire_aggregator.yml | 216 ------------------ .../templates/openshift-ansible-catalog-console.js | 1 - .../common/openshift-master/validate_restart.yml | 65 ------ playbooks/openshift-master/additional_config.yml | 4 + playbooks/openshift-master/certificates.yml | 4 + playbooks/openshift-master/config.yml | 4 + .../openshift-master/private/additional_config.yml | 52 +++++ .../openshift-master/private/certificates.yml | 14 ++ playbooks/openshift-master/private/config.yml | 252 +++++++++++++++++++++ playbooks/openshift-master/private/filter_plugins | 1 + playbooks/openshift-master/private/library | 1 + playbooks/openshift-master/private/lookup_plugins | 1 + playbooks/openshift-master/private/restart.yml | 19 ++ .../openshift-master/private/restart_hosts.yml | 40 ++++ .../openshift-master/private/restart_services.yml | 4 + .../openshift-master/private/revert-client-ca.yml | 17 ++ playbooks/openshift-master/private/roles | 1 + playbooks/openshift-master/private/scaleup.yml | 57 +++++ .../openshift-master/private/set_network_facts.yml | 34 +++ .../private/tasks/wire_aggregator.yml | 216 ++++++++++++++++++ .../templates/openshift-ansible-catalog-console.js | 1 + .../openshift-master/private/validate_restart.yml | 65 ++++++ playbooks/openshift-master/restart.yml | 4 + playbooks/openshift-master/scaleup.yml | 23 ++ roles/installer_checkpoint/README.md | 2 +- .../callback_plugins/installer_checkpoint.py | 4 +- 61 files changed, 838 insertions(+), 841 deletions(-) delete mode 100644 playbooks/byo/openshift-master/additional_config.yml delete mode 100644 playbooks/byo/openshift-master/certificates.yml delete mode 100644 playbooks/byo/openshift-master/config.yml delete mode 120000 playbooks/byo/openshift-master/filter_plugins delete mode 120000 playbooks/byo/openshift-master/lookup_plugins delete mode 100644 playbooks/byo/openshift-master/restart.yml delete mode 120000 playbooks/byo/openshift-master/roles delete mode 100644 playbooks/byo/openshift-master/scaleup.yml delete mode 100644 playbooks/common/openshift-master/additional_config.yml delete mode 100644 playbooks/common/openshift-master/certificates.yml delete mode 100644 playbooks/common/openshift-master/config.yml delete mode 120000 playbooks/common/openshift-master/filter_plugins delete mode 120000 playbooks/common/openshift-master/library delete mode 120000 playbooks/common/openshift-master/lookup_plugins delete mode 100644 playbooks/common/openshift-master/restart.yml delete mode 100644 playbooks/common/openshift-master/restart_hosts.yml delete mode 100644 playbooks/common/openshift-master/restart_services.yml delete mode 100644 playbooks/common/openshift-master/revert-client-ca.yml delete mode 120000 playbooks/common/openshift-master/roles delete mode 100644 playbooks/common/openshift-master/scaleup.yml delete mode 100644 playbooks/common/openshift-master/set_network_facts.yml delete mode 100644 playbooks/common/openshift-master/tasks/wire_aggregator.yml delete mode 100644 playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js delete mode 100644 playbooks/common/openshift-master/validate_restart.yml create mode 100644 playbooks/openshift-master/additional_config.yml create mode 100644 playbooks/openshift-master/certificates.yml create mode 100644 playbooks/openshift-master/config.yml create mode 100644 playbooks/openshift-master/private/additional_config.yml create mode 100644 playbooks/openshift-master/private/certificates.yml create mode 100644 playbooks/openshift-master/private/config.yml create mode 120000 playbooks/openshift-master/private/filter_plugins create mode 120000 playbooks/openshift-master/private/library create mode 120000 playbooks/openshift-master/private/lookup_plugins create mode 100644 playbooks/openshift-master/private/restart.yml create mode 100644 playbooks/openshift-master/private/restart_hosts.yml create mode 100644 playbooks/openshift-master/private/restart_services.yml create mode 100644 playbooks/openshift-master/private/revert-client-ca.yml create mode 120000 playbooks/openshift-master/private/roles create mode 100644 playbooks/openshift-master/private/scaleup.yml create mode 100644 playbooks/openshift-master/private/set_network_facts.yml create mode 100644 playbooks/openshift-master/private/tasks/wire_aggregator.yml create mode 100644 playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js create mode 100644 playbooks/openshift-master/private/validate_restart.yml create mode 100644 playbooks/openshift-master/restart.yml create mode 100644 playbooks/openshift-master/scaleup.yml diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 56ca25eb4..6c177bd21 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -33,10 +33,10 @@ when: groups.oo_lb_to_config | default([]) | count > 0 - name: include openshift-master config - include: ../../common/openshift-master/config.yml + include: ../../openshift-master/private/config.yml - name: include master additional config - include: ../../common/openshift-master/additional_config.yml + include: ../../openshift-master/private/additional_config.yml - name: include master additional config include: ../../openshift-node/private/config.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 46bdead08..c26f11772 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -13,7 +13,7 @@ - include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml -- include: ../../common/openshift-master/certificates.yml +- include: ../../openshift-master/private/certificates.yml vars: openshift_certificates_redeploy: true @@ -27,7 +27,7 @@ vars: g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" -- include: ../../common/openshift-master/restart.yml +- include: ../../openshift-master/private/restart.yml - include: ../../openshift-node/private/restart.yml @@ -37,6 +37,6 @@ - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml when: openshift_hosted_manage_registry | default(true) | bool -- include: ../../common/openshift-master/revert-client-ca.yml +- include: ../../openshift-master/private/revert-client-ca.yml -- include: ../../common/openshift-master/restart.yml +- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index f9d12251f..94e50cc28 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -15,4 +15,4 @@ vars: g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" -- include: ../../common/openshift-master/restart.yml +- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml index 6a4528b7f..88e52f809 100644 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml @@ -3,8 +3,8 @@ - include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml -- include: ../../common/openshift-master/certificates.yml +- include: ../../openshift-master/private/certificates.yml vars: openshift_certificates_redeploy: true -- include: ../../common/openshift-master/restart.yml +- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml deleted file mode 100644 index 1454190b2..000000000 --- a/playbooks/byo/openshift-master/additional_config.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-master/additional_config.yml diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml deleted file mode 100644 index 344985244..000000000 --- a/playbooks/byo/openshift-master/certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-master/certificates.yml diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml deleted file mode 100644 index 913525e65..000000000 --- a/playbooks/byo/openshift-master/config.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-master/config.yml diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/byo/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/byo/openshift-master/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml deleted file mode 100644 index d2031d928..000000000 --- a/playbooks/byo/openshift-master/restart.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/byo/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml deleted file mode 100644 index 4fdec5e7d..000000000 --- a/playbooks/byo/openshift-master/scaleup.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- include: ../../init/evaluate_groups.yml - -- name: Ensure there are new_masters or new_nodes - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: - msg: > - Detected no new_masters or no new_nodes in inventory. Please - add hosts to the new_masters and new_nodes host groups to add - masters. - when: - - g_new_master_hosts | default([]) | length == 0 - - g_new_node_hosts | default([]) | length == 0 - -# Need a better way to do the above check for node without -# running evaluate_groups and init/main.yml -- include: ../../init/main.yml - -- include: ../../common/openshift-master/scaleup.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index bd65b3c8f..5e74e2c0b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -9,9 +9,9 @@ - include: ../../openshift-loadbalancer/private/config.yml when: groups.oo_lb_to_config | default([]) | count > 0 -- include: ../openshift-master/config.yml +- include: ../../openshift-master/private/config.yml -- include: ../openshift-master/additional_config.yml +- include: ../../openshift-master/private/additional_config.yml - include: ../../openshift-node/private/config.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 41e19f5d6..ff64342c8 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -84,7 +84,7 @@ state: absent changed_when: false -- include: ../../openshift-master/restart.yml +- include: ../../../openshift-master/private/restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index b58bf3c91..5a837d80d 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -207,7 +207,7 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../openshift-master/restart.yml +- include: ../../../openshift-master/private/restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index fa65567c2..52438bdc4 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -85,10 +85,10 @@ - include: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - - include: ../../openshift-master/restart_hosts.yml + - include: ../../../openshift-master/private/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - - include: ../../openshift-master/restart_services.yml + - include: ../../../openshift-master/private/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' # Run the post-upgrade hook if defined: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index ef52f214b..6cb6a665f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -79,7 +79,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 4c6646a38..8f48bedcc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -83,7 +83,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index e3c012380..2b99568c7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -83,7 +83,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index a88fa7b2e..d3d2046e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -87,7 +87,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 73df15d53..b602cdd0e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -83,7 +83,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 48d55c16f..da81e6dea 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -87,7 +87,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml deleted file mode 100644 index 32f638d42..000000000 --- a/playbooks/common/openshift-master/additional_config.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: Master Additional Install Checkpoint Start - hosts: all - gather_facts: false - tasks: - - name: Set Master Additional install 'In Progress' - run_once: true - set_stats: - data: - installer_phase_master_additional: - status: "In Progress" - start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- name: Additional master configuration - hosts: oo_first_master - vars: - cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" - etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" - omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" - roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - role: openshift_project_request_template - when: openshift_project_request_template_manage - - role: openshift_examples - when: openshift_install_examples | default(true, true) | bool - registry_url: "{{ openshift.master.registry_url }}" - - role: openshift_hosted_templates - registry_url: "{{ openshift.master.registry_url }}" - - role: openshift_manageiq - when: openshift_use_manageiq | default(true) | bool - - role: cockpit - when: - - not openshift.common.is_atomic | bool - - deployment_type == 'openshift-enterprise' - - osm_use_cockpit is undefined or osm_use_cockpit | bool - - openshift.common.deployment_subtype != 'registry' - - role: flannel_register - when: openshift_use_flannel | default(false) | bool - -- name: Master Additional Install Checkpoint End - hosts: all - gather_facts: false - tasks: - - name: Set Master Additional install 'Complete' - run_once: true - set_stats: - data: - installer_phase_master_additional: - status: "Complete" - end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml deleted file mode 100644 index f6afbc36f..000000000 --- a/playbooks/common/openshift-master/certificates.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create OpenShift certificates for master hosts - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - roles: - - role: openshift_master_facts - - role: openshift_named_certificates - - role: openshift_ca - - role: openshift_master_certificates - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml deleted file mode 100644 index 6b0fd6b7c..000000000 --- a/playbooks/common/openshift-master/config.yml +++ /dev/null @@ -1,252 +0,0 @@ ---- -- name: Master Install Checkpoint Start - hosts: all - gather_facts: false - tasks: - - name: Set Master install 'In Progress' - run_once: true - set_stats: - data: - installer_phase_master: - status: "In Progress" - start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- include: certificates.yml - -- name: Disable excluders - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Gather and set facts for master hosts - hosts: oo_masters_to_config - pre_tasks: - # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 - # - # When scaling up a cluster upgraded from OCP <= 3.5, ensure that - # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing - # masters, or absent if such is the case. - - name: Detect if this host is a new master in a scale up - set_fact: - g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" - - - name: Scaleup Detection - debug: - var: g_openshift_master_is_scaleup - - - name: Check for RPM generated config marker file .config_managed - stat: - path: /etc/origin/.config_managed - register: rpmgenerated_config - - - name: Remove RPM generated config files if present - file: - path: "/etc/origin/{{ item }}" - state: absent - when: - - rpmgenerated_config.stat.exists == true - - deployment_type == 'openshift-enterprise' - with_items: - - master - - node - - .config_managed - - - set_fact: - openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] - | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - roles: - - openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - api_port: "{{ openshift_master_api_port | default(None) }}" - api_url: "{{ openshift_master_api_url | default(None) }}" - api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" - controllers_port: "{{ openshift_master_controllers_port | default(None) }}" - public_api_url: "{{ openshift_master_public_api_url | default(None) }}" - cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" - cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - console_path: "{{ openshift_master_console_path | default(None) }}" - console_port: "{{ openshift_master_console_port | default(None) }}" - console_url: "{{ openshift_master_console_url | default(None) }}" - console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" - public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" - master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - -- name: Inspect state of first master config settings - hosts: oo_first_master - roles: - - role: openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" - session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" - - name: Check for existing configuration - stat: - path: /etc/origin/master/master-config.yaml - register: master_config_stat - - - name: Set clean install fact - set_fact: - l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" - - - name: Determine if etcd3 storage is in use - command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q - register: etcd3_grep - failed_when: false - changed_when: false - - - name: Set etcd3 fact - set_fact: - l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - - - name: Check if atomic-openshift-master sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master - register: l_aom_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master - register: l_default_registry_defined - when: l_aom_exists.stat.exists | bool - - - name: Check if atomic-openshift-master-api sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master-api - register: l_aom_api_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api - register: l_default_registry_defined_api - when: l_aom_api_exists.stat.exists | bool - - - name: Check if atomic-openshift-master-controllers sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master-controllers - register: l_aom_controllers_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers - register: l_default_registry_defined_controllers - when: l_aom_controllers_exists.stat.exists | bool - - - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value - set_fact: - l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" - l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" - l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - -- name: Generate master session secrets - hosts: oo_first_master - vars: - g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" - g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" - g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" - roles: - - role: openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - session_auth_secrets: "{{ g_session_auth_secrets }}" - session_encryption_secrets: "{{ g_session_encryption_secrets }}" - when: not g_session_secrets_present | bool - -- name: Configure masters - hosts: oo_masters_to_config - any_errors_fatal: true - vars: - openshift_master_ha: "{{ openshift.master.ha }}" - openshift_master_count: "{{ openshift.master.master_count }}" - openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" - openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.ip') | default([]) | join(',') - }}" - roles: - - role: os_firewall - - role: openshift_master_facts - - role: openshift_hosted_facts - - role: openshift_clock - - role: openshift_cloud_provider - - role: openshift_builddefaults - - role: openshift_buildoverrides - - role: nickhammond.logrotate - - role: contiv - contiv_role: netmaster - when: openshift_use_contiv | default(False) | bool - - role: openshift_master - openshift_master_hosts: "{{ groups.oo_masters_to_config }}" - r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" - r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" - openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" - openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" - openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - - role: tuned - - role: nuage_ca - when: openshift_use_nuage | default(false) | bool - - role: nuage_common - when: openshift_use_nuage | default(false) | bool - - role: nuage_master - when: openshift_use_nuage | default(false) | bool - - role: calico_master - when: openshift_use_calico | default(false) | bool - tasks: - - include_role: - name: kuryr - tasks_from: master - when: openshift_use_kuryr | default(false) | bool - - - name: Setup the node group config maps - include_role: - name: openshift_node_group - when: openshift_master_bootstrap_enabled | default(false) | bool - run_once: True - - post_tasks: - - name: Create group for deployment type - group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} - changed_when: False - -- name: Configure API Aggregation on masters - hosts: oo_masters - serial: 1 - tasks: - - include: tasks/wire_aggregator.yml - -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Master Install Checkpoint End - hosts: all - gather_facts: false - tasks: - - name: Set Master install 'Complete' - run_once: true - set_stats: - data: - installer_phase_master: - status: "Complete" - end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library deleted file mode 120000 index d0b7393d3..000000000 --- a/playbooks/common/openshift-master/library +++ /dev/null @@ -1 +0,0 @@ -../../../library/ \ No newline at end of file diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-master/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml deleted file mode 100644 index 4d73b8124..000000000 --- a/playbooks/common/openshift-master/restart.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- include: validate_restart.yml - -- name: Restart masters - hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" - serial: 1 - handlers: - - include: ../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - post_tasks: - - include: restart_hosts.yml - when: openshift_rolling_restart_mode | default('services') == 'system' - - - include: restart_services.yml - when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml deleted file mode 100644 index a5dbe0590..000000000 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes - -# WARNING: This process is riddled with weird behavior. - -# Workaround for https://github.com/ansible/ansible/issues/21269 -- set_fact: - wait_for_host: "{{ ansible_host }}" - -# Ansible's blog documents this *without* the port, which appears to now -# just wait until the timeout value and then proceed without checking anything. -# port is now required. -# -# However neither ansible_ssh_port or ansible_port are reliably defined, likely -# only if overridden. Assume a default of 22. -- name: Wait for master to restart - local_action: - module: wait_for - host="{{ wait_for_host }}" - state=started - delay=10 - timeout=600 - port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" - become: no - -# Now that ssh is back up we can wait for API on the remote system, -# avoiding some potential connection issues from local system: -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml deleted file mode 100644 index 4e1b3a3be..000000000 --- a/playbooks/common/openshift-master/restart_services.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include_role: - name: openshift_master - tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml deleted file mode 100644 index 9ae23bf5b..000000000 --- a/playbooks/common/openshift-master/revert-client-ca.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Set servingInfo.clientCA = ca.crt in master config - hosts: oo_masters_to_config - tasks: - - name: Read master config - slurp: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_output - - # servingInfo.clientCA may be set as the client-ca-bundle.crt from - # CA redeployment and this task reverts that change. - - name: Set servingInfo.clientCA = ca.crt in master config - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml deleted file mode 100644 index ed54e6ca4..000000000 --- a/playbooks/common/openshift-master/scaleup.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: Update master count - hosts: oo_masters:!oo_masters_to_config - serial: 1 - roles: - - openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" - master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - - name: Update master count - modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.masterCount' - yaml_value: "{{ openshift.master.master_count }}" - notify: - - restart master api - - restart master controllers - handlers: - - name: restart master api - service: name={{ openshift.common.service_type }}-master-controllers state=restarted - notify: verify api server - # We retry the controllers because the API may not be 100% initialized yet. - - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - - name: verify api server - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - -- include: ../openshift-master/set_network_facts.yml - -- include: ../../openshift-etcd/private/certificates.yml - -- include: ../openshift-master/config.yml - -- include: ../../openshift-loadbalancer/private/config.yml - -- include: ../../openshift-node/private/certificates.yml - -- include: ../../openshift-node/private/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml deleted file mode 100644 index 9a6cf26fc..000000000 --- a/playbooks/common/openshift-master/set_network_facts.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Read first master\'s config - hosts: oo_first_master - gather_facts: no - tasks: - - stat: - path: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_stat - - slurp: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_slurp - -- name: Set network facts for masters - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - post_tasks: - - block: - - set_fact: - osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" - when: osm_cluster_network_cidr is not defined - - set_fact: - osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" - when: osm_host_subnet_length is not defined - - set_fact: - openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" - when: openshift_portal_net is not defined - - openshift_facts: - role: common - local_facts: - portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" - when: - - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml deleted file mode 100644 index 97acc5d5d..000000000 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ /dev/null @@ -1,216 +0,0 @@ ---- -- name: Make temp cert dir - command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX - register: certtemp - changed_when: False - -- name: Check for First Master Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_crt - changed_when: false - delegate_to: "{{ groups.oo_first_master.0 }}" - -- name: Check for First Master Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_key - changed_when: false - delegate_to: "{{ groups.oo_first_master.0 }}" - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert - --cert=/etc/origin/master/front-proxy-ca.crt - --key=/etc/origin/master/front-proxy-ca.key - --serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ groups.oo_first_master.0 }}" - when: - - not first_proxy_ca_crt.stat.exists - - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_crt - changed_when: false - -- name: Check for Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_key - changed_when: false - -- name: Copy Aggregator Signer certs from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - delegate_to: "{{ groups.oo_first_master.0 }}" - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -# oc_adm_ca_server_cert: -# cert: /etc/origin/master/front-proxy-ca.crt -# key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: first_front_proxy_kubeconfig - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - -# create-api-client-config generates a ca.crt file which will -# overwrite the OpenShift CA certificate. Generate the aggregator -# kubeconfig in a temporary directory and then copy files into the -# master config dir to avoid overwriting ca.crt. -- block: - - name: Create first master api-client config for Aggregator - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config - --certificate-authority=/etc/origin/master/front-proxy-ca.crt - --signer-cert=/etc/origin/master/front-proxy-ca.crt - --signer-key=/etc/origin/master/front-proxy-ca.key - --user aggregator-front-proxy - --client-dir={{ certtemp.stdout }} - --signer-serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - - name: Copy first master api-client config for Aggregator - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/" - remote_src: true - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - when: - - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: front_proxy_kubeconfig - -- name: Copy api-client config from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - delegate_to: "{{ groups.oo_first_master.0 }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False - -- name: Setup extension file for service console UI - template: - src: ../templates/openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config - yedit: - state: present - src: /etc/origin/master/master-config.yaml - edits: - - key: aggregatorConfig.proxyClientInfo.certFile - value: aggregator-front-proxy.crt - - key: aggregatorConfig.proxyClientInfo.keyFile - value: aggregator-front-proxy.key - - key: authConfig.requestHeader.clientCA - value: front-proxy-ca.crt - - key: authConfig.requestHeader.clientCommonNames - value: [aggregator-front-proxy] - - key: authConfig.requestHeader.usernameHeaders - value: [X-Remote-User] - - key: authConfig.requestHeader.groupHeaders - value: [X-Remote-Group] - - key: authConfig.requestHeader.extraHeaderPrefixes - value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - - key: kubernetesMasterConfig.apiServerArguments.runtime-config - value: [apis/settings.k8s.io/v1alpha1=true] - - key: admissionConfig.pluginConfig.PodPreset.configuration.kind - value: DefaultAdmissionConfig - - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion - value: v1 - - key: admissionConfig.pluginConfig.PodPreset.configuration.disable - value: false - register: yedit_output - -#restart master serially here -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js deleted file mode 100644 index fd02325ba..000000000 --- a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js +++ /dev/null @@ -1 +0,0 @@ -window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-master/validate_restart.yml b/playbooks/common/openshift-master/validate_restart.yml deleted file mode 100644 index 5dbb21502..000000000 --- a/playbooks/common/openshift-master/validate_restart.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Validate configuration for rolling restart - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - fail: - msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" - when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" - -# Creating a temp file on localhost, we then check each system that will -# be rebooted to see if that file exists, if so we know we're running -# ansible on a machine that needs a reboot, and we need to error out. -- name: Create temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - local_action: command mktemp - register: mktemp - changed_when: false - -- name: Check if temp file exists on any masters - hosts: oo_masters_to_config - tasks: - - stat: path="{{ hostvars.localhost.mktemp.stdout }}" - register: exists - changed_when: false - -- name: Cleanup temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent - changed_when: false - -- name: Warn if restarting the system where ansible is running - hosts: oo_masters_to_config - tasks: - - pause: - prompt: > - Warning: Running playbook from a host that will be restarted! - Press CTRL+C and A to abort playbook execution. You may - continue by pressing ENTER but the playbook will stop - executing after this system has been restarted and services - must be verified manually. To only restart services, set - openshift_master_rolling_restart_mode=services in host - inventory and relaunch the playbook. - when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' - - set_fact: - current_host: "{{ exists.stat.exists }}" - when: openshift.common.rolling_restart_mode == 'system' diff --git a/playbooks/openshift-master/additional_config.yml b/playbooks/openshift-master/additional_config.yml new file mode 100644 index 000000000..e43e9e002 --- /dev/null +++ b/playbooks/openshift-master/additional_config.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/additional_config.yml diff --git a/playbooks/openshift-master/certificates.yml b/playbooks/openshift-master/certificates.yml new file mode 100644 index 000000000..0384877d9 --- /dev/null +++ b/playbooks/openshift-master/certificates.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/certificates.yml diff --git a/playbooks/openshift-master/config.yml b/playbooks/openshift-master/config.yml new file mode 100644 index 000000000..8ee57ce8d --- /dev/null +++ b/playbooks/openshift-master/config.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/config.yml diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml new file mode 100644 index 000000000..32f638d42 --- /dev/null +++ b/playbooks/openshift-master/private/additional_config.yml @@ -0,0 +1,52 @@ +--- +- name: Master Additional Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Master Additional install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_master_additional: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Additional master configuration + hosts: oo_first_master + vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + etcd_urls: "{{ openshift.master.etcd_urls }}" + openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" + omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" + roles: + - role: openshift_master_cluster + when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" + - role: openshift_project_request_template + when: openshift_project_request_template_manage + - role: openshift_examples + when: openshift_install_examples | default(true, true) | bool + registry_url: "{{ openshift.master.registry_url }}" + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + - role: openshift_manageiq + when: openshift_use_manageiq | default(true) | bool + - role: cockpit + when: + - not openshift.common.is_atomic | bool + - deployment_type == 'openshift-enterprise' + - osm_use_cockpit is undefined or osm_use_cockpit | bool + - openshift.common.deployment_subtype != 'registry' + - role: flannel_register + when: openshift_use_flannel | default(false) | bool + +- name: Master Additional Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Master Additional install 'Complete' + run_once: true + set_stats: + data: + installer_phase_master_additional: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/certificates.yml b/playbooks/openshift-master/private/certificates.yml new file mode 100644 index 000000000..f6afbc36f --- /dev/null +++ b/playbooks/openshift-master/private/certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create OpenShift certificates for master hosts + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + - role: openshift_master_certificates + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml new file mode 100644 index 000000000..6b0fd6b7c --- /dev/null +++ b/playbooks/openshift-master/private/config.yml @@ -0,0 +1,252 @@ +--- +- name: Master Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Master install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_master: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: certificates.yml + +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + +- name: Gather and set facts for master hosts + hosts: oo_masters_to_config + pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + + - name: Check for RPM generated config marker file .config_managed + stat: + path: /etc/origin/.config_managed + register: rpmgenerated_config + + - name: Remove RPM generated config files if present + file: + path: "/etc/origin/{{ item }}" + state: absent + when: + - rpmgenerated_config.stat.exists == true + - deployment_type == 'openshift-enterprise' + with_items: + - master + - node + - .config_managed + + - set_fact: + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] + | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + controllers_port: "{{ openshift_master_controllers_port | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" + cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + +- name: Inspect state of first master config settings + hosts: oo_first_master + roles: + - role: openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" + session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" + +- name: Generate master session secrets + hosts: oo_first_master + vars: + g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" + g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" + g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" + roles: + - role: openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ g_session_auth_secrets }}" + session_encryption_secrets: "{{ g_session_encryption_secrets }}" + when: not g_session_secrets_present | bool + +- name: Configure masters + hosts: oo_masters_to_config + any_errors_fatal: true + vars: + openshift_master_ha: "{{ openshift.master.ha }}" + openshift_master_count: "{{ openshift.master.master_count }}" + openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" + openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" + roles: + - role: os_firewall + - role: openshift_master_facts + - role: openshift_hosted_facts + - role: openshift_clock + - role: openshift_cloud_provider + - role: openshift_builddefaults + - role: openshift_buildoverrides + - role: nickhammond.logrotate + - role: contiv + contiv_role: netmaster + when: openshift_use_contiv | default(False) | bool + - role: openshift_master + openshift_master_hosts: "{{ groups.oo_masters_to_config }}" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: tuned + - role: nuage_ca + when: openshift_use_nuage | default(false) | bool + - role: nuage_common + when: openshift_use_nuage | default(false) | bool + - role: nuage_master + when: openshift_use_nuage | default(false) | bool + - role: calico_master + when: openshift_use_calico | default(false) | bool + tasks: + - include_role: + name: kuryr + tasks_from: master + when: openshift_use_kuryr | default(false) | bool + + - name: Setup the node group config maps + include_role: + name: openshift_node_group + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + + post_tasks: + - name: Create group for deployment type + group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + +- name: Configure API Aggregation on masters + hosts: oo_masters + serial: 1 + tasks: + - include: tasks/wire_aggregator.yml + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + +- name: Master Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Master install 'Complete' + run_once: true + set_stats: + data: + installer_phase_master: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/filter_plugins b/playbooks/openshift-master/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-master/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/openshift-master/private/library b/playbooks/openshift-master/private/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/openshift-master/private/library @@ -0,0 +1 @@ +../../../library/ \ No newline at end of file diff --git a/playbooks/openshift-master/private/lookup_plugins b/playbooks/openshift-master/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-master/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml new file mode 100644 index 000000000..4d73b8124 --- /dev/null +++ b/playbooks/openshift-master/private/restart.yml @@ -0,0 +1,19 @@ +--- +- include: validate_restart.yml + +- name: Restart masters + hosts: oo_masters_to_config + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + serial: 1 + handlers: + - include: ../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + post_tasks: + - include: restart_hosts.yml + when: openshift_rolling_restart_mode | default('services') == 'system' + + - include: restart_services.yml + when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/restart_hosts.yml b/playbooks/openshift-master/private/restart_hosts.yml new file mode 100644 index 000000000..a5dbe0590 --- /dev/null +++ b/playbooks/openshift-master/private/restart_hosts.yml @@ -0,0 +1,40 @@ +--- +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes + +# WARNING: This process is riddled with weird behavior. + +# Workaround for https://github.com/ansible/ansible/issues/21269 +- set_fact: + wait_for_host: "{{ ansible_host }}" + +# Ansible's blog documents this *without* the port, which appears to now +# just wait until the timeout value and then proceed without checking anything. +# port is now required. +# +# However neither ansible_ssh_port or ansible_port are reliably defined, likely +# only if overridden. Assume a default of 22. +- name: Wait for master to restart + local_action: + module: wait_for + host="{{ wait_for_host }}" + state=started + delay=10 + timeout=600 + port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" + become: no + +# Now that ssh is back up we can wait for API on the remote system, +# avoiding some potential connection issues from local system: +- name: Wait for master API to come back online + wait_for: + host: "{{ openshift.common.hostname }}" + state: started + delay: 10 + port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/openshift-master/private/restart_services.yml b/playbooks/openshift-master/private/restart_services.yml new file mode 100644 index 000000000..4e1b3a3be --- /dev/null +++ b/playbooks/openshift-master/private/restart_services.yml @@ -0,0 +1,4 @@ +--- +- include_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/openshift-master/private/revert-client-ca.yml b/playbooks/openshift-master/private/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/openshift-master/private/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/openshift-master/private/roles b/playbooks/openshift-master/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-master/private/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml new file mode 100644 index 000000000..021399965 --- /dev/null +++ b/playbooks/openshift-master/private/scaleup.yml @@ -0,0 +1,57 @@ +--- +- name: Update master count + hosts: oo_masters:!oo_masters_to_config + serial: 1 + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + - name: Update master count + modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.masterCount' + yaml_value: "{{ openshift.master.master_count }}" + notify: + - restart master api + - restart master controllers + handlers: + - name: restart master api + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. + - name: restart master controllers + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + - name: verify api server + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- include: set_network_facts.yml + +- include: ../../openshift-etcd/private/certificates.yml + +- include: config.yml + +- include: ../../openshift-loadbalancer/private/config.yml + +- include: ../../openshift-node/private/certificates.yml + +- include: ../../openshift-node/private/config.yml diff --git a/playbooks/openshift-master/private/set_network_facts.yml b/playbooks/openshift-master/private/set_network_facts.yml new file mode 100644 index 000000000..9a6cf26fc --- /dev/null +++ b/playbooks/openshift-master/private/set_network_facts.yml @@ -0,0 +1,34 @@ +--- +- name: Read first master\'s config + hosts: oo_first_master + gather_facts: no + tasks: + - stat: + path: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_stat + - slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_slurp + +- name: Set network facts for masters + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_facts + post_tasks: + - block: + - set_fact: + osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" + when: osm_cluster_network_cidr is not defined + - set_fact: + osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" + when: osm_host_subnet_length is not defined + - set_fact: + openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" + when: openshift_portal_net is not defined + - openshift_facts: + role: common + local_facts: + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + when: + - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml new file mode 100644 index 000000000..97acc5d5d --- /dev/null +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -0,0 +1,216 @@ +--- +- name: Make temp cert dir + command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX + register: certtemp + changed_when: False + +- name: Check for First Master Aggregator Signer cert + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: first_proxy_ca_crt + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + +- name: Check for First Master Aggregator Signer key + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: first_proxy_ca_key + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + +# TODO: this currently has a bug where hostnames are required +- name: Creating First Master Aggregator signer certs + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert + --cert=/etc/origin/master/front-proxy-ca.crt + --key=/etc/origin/master/front-proxy-ca.key + --serial=/etc/origin/master/ca.serial.txt + delegate_to: "{{ groups.oo_first_master.0 }}" + when: + - not first_proxy_ca_crt.stat.exists + - not first_proxy_ca_key.stat.exists + +- name: Check for Aggregator Signer cert + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: proxy_ca_crt + changed_when: false + +- name: Check for Aggregator Signer key + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: proxy_ca_key + changed_when: false + +- name: Copy Aggregator Signer certs from first master + fetch: + src: "/etc/origin/master/{{ item }}" + dest: "{{ certtemp.stdout }}/{{ item }}" + flat: yes + with_items: + - front-proxy-ca.crt + - front-proxy-ca.key + delegate_to: "{{ groups.oo_first_master.0 }}" + when: + - not proxy_ca_key.stat.exists + - not proxy_ca_crt.stat.exists + +- name: Copy Aggregator Signer certs to host + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/{{ item }}" + with_items: + - front-proxy-ca.crt + - front-proxy-ca.key + when: + - not proxy_ca_key.stat.exists + - not proxy_ca_crt.stat.exists + +# oc_adm_ca_server_cert: +# cert: /etc/origin/master/front-proxy-ca.crt +# key: /etc/origin/master/front-proxy-ca.key + +- name: Check for first master api-client config + stat: + path: /etc/origin/master/aggregator-front-proxy.kubeconfig + register: first_front_proxy_kubeconfig + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + +# create-api-client-config generates a ca.crt file which will +# overwrite the OpenShift CA certificate. Generate the aggregator +# kubeconfig in a temporary directory and then copy files into the +# master config dir to avoid overwriting ca.crt. +- block: + - name: Create first master api-client config for Aggregator + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config + --certificate-authority=/etc/origin/master/front-proxy-ca.crt + --signer-cert=/etc/origin/master/front-proxy-ca.crt + --signer-key=/etc/origin/master/front-proxy-ca.key + --user aggregator-front-proxy + --client-dir={{ certtemp.stdout }} + --signer-serial=/etc/origin/master/ca.serial.txt + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Copy first master api-client config for Aggregator + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/" + remote_src: true + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + when: + - not first_front_proxy_kubeconfig.stat.exists + +- name: Check for api-client config + stat: + path: /etc/origin/master/aggregator-front-proxy.kubeconfig + register: front_proxy_kubeconfig + +- name: Copy api-client config from first master + fetch: + src: "/etc/origin/master/{{ item }}" + dest: "{{ certtemp.stdout }}/{{ item }}" + flat: yes + delegate_to: "{{ groups.oo_first_master.0 }}" + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + when: + - not front_proxy_kubeconfig.stat.exists + +- name: Copy api-client config to host + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/{{ item }}" + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + when: + - not front_proxy_kubeconfig.stat.exists + +- name: Delete temp directory + file: + name: "{{ certtemp.stdout }}" + state: absent + changed_when: False + +- name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- name: Update master config + yedit: + state: present + src: /etc/origin/master/master-config.yaml + edits: + - key: aggregatorConfig.proxyClientInfo.certFile + value: aggregator-front-proxy.crt + - key: aggregatorConfig.proxyClientInfo.keyFile + value: aggregator-front-proxy.key + - key: authConfig.requestHeader.clientCA + value: front-proxy-ca.crt + - key: authConfig.requestHeader.clientCommonNames + value: [aggregator-front-proxy] + - key: authConfig.requestHeader.usernameHeaders + value: [X-Remote-User] + - key: authConfig.requestHeader.groupHeaders + value: [X-Remote-Group] + - key: authConfig.requestHeader.extraHeaderPrefixes + value: [X-Remote-Extra-] + - key: assetConfig.extensionScripts + value: [/etc/origin/master/openshift-ansible-catalog-console.js] + - key: kubernetesMasterConfig.apiServerArguments.runtime-config + value: [apis/settings.k8s.io/v1alpha1=true] + - key: admissionConfig.pluginConfig.PodPreset.configuration.kind + value: DefaultAdmissionConfig + - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion + value: v1 + - key: admissionConfig.pluginConfig.PodPreset.configuration.disable + value: false + register: yedit_output + +#restart master serially here +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: + - yedit_output.changed + - openshift.master.cluster_method == 'native' + +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + when: + - yedit_output.changed + - openshift.master.cluster_method == 'native' + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + when: + - yedit_output.changed diff --git a/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml new file mode 100644 index 000000000..5dbb21502 --- /dev/null +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -0,0 +1,65 @@ +--- +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: false + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + changed_when: false + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + changed_when: false + +- name: Warn if restarting the system where ansible is running + hosts: oo_masters_to_config + tasks: + - pause: + prompt: > + Warning: Running playbook from a host that will be restarted! + Press CTRL+C and A to abort playbook execution. You may + continue by pressing ENTER but the playbook will stop + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. + when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' + - set_fact: + current_host: "{{ exists.stat.exists }}" + when: openshift.common.rolling_restart_mode == 'system' diff --git a/playbooks/openshift-master/restart.yml b/playbooks/openshift-master/restart.yml new file mode 100644 index 000000000..5e28e274e --- /dev/null +++ b/playbooks/openshift-master/restart.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/restart.yml diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml new file mode 100644 index 000000000..aa0dd8094 --- /dev/null +++ b/playbooks/openshift-master/scaleup.yml @@ -0,0 +1,23 @@ +--- +- include: ../init/evaluate_groups.yml + +- name: Ensure there are new_masters or new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_masters or no new_nodes in inventory. Please + add hosts to the new_masters and new_nodes host groups to add + masters. + when: + - g_new_master_hosts | default([]) | length == 0 + - g_new_node_hosts | default([]) | length == 0 + +# Need a better way to do the above check for node without +# running evaluate_groups and init/main.yml +- include: ../init/main.yml + +- include: private/scaleup.yml diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md index 6426cd545..68c0357b6 100644 --- a/roles/installer_checkpoint/README.md +++ b/roles/installer_checkpoint/README.md @@ -160,7 +160,7 @@ Health Check : Complete (0:01:10) etcd Install : Complete (0:02:58) Master Install : Complete (0:09:20) Master Additional Install : In Progress (0:20:04) - This phase can be restarted by running: playbooks/byo/openshift-master/additional_config.yml + This phase can be restarted by running: playbooks/openshift-master/additional_config.yml ``` [set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index b001e7cb0..fcda9aa51 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -62,11 +62,11 @@ class CallbackModule(CallbackBase): }, 'installer_phase_master': { 'title': 'Master Install', - 'playbook': 'playbooks/byo/openshift-master/config.yml' + 'playbook': 'playbooks/openshift-master/config.yml' }, 'installer_phase_master_additional': { 'title': 'Master Additional Install', - 'playbook': 'playbooks/byo/openshift-master/additional_config.yml' + 'playbook': 'playbooks/openshift-master/additional_config.yml' }, 'installer_phase_node': { 'title': 'Node Install', -- cgit v1.2.1