--- ############################################################################### # Upgrade Masters ############################################################################### - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml # Create service signer cert when missing. Service signer certificate # is added to master config in the master_config_upgrade hook. - name: Determine if service signer cert must be created hosts: oo_first_master tasks: - name: Determine if service signer certificate must be created stat: path: "{{ openshift.common.config_base }}/master/service-signer.crt" register: service_signer_cert_stat changed_when: false - import_playbook: create_service_signer_cert.yml # oc adm migrate storage should be run prior to etcd v3 upgrade # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master tasks: - name: Upgrade all storage command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. - name: Set OpenShift master facts hosts: oo_masters_to_config roles: - openshift_master_facts # The main master upgrade play. Should handle all changes to the system in one pass, with # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 tasks: - import_role: name: openshift_facts # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - include_tasks: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - import_role: name: openshift_master tasks_from: upgrade.yml # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' # Run the post-upgrade hook if defined: - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - include_tasks: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true delegate_to: "{{ groups.oo_first_master.0 }}" - set_fact: master_update_complete: True ############################################################################## # Gate on master update complete ############################################################################## - name: Gate on master update hosts: localhost connection: local become: no tasks: - set_fact: master_update_completed: "{{ hostvars | lib_utils_oo_select_keys(groups.oo_masters_to_config) | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - set_fact: master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 ############################################################################### # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints ############################################################################### - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints hosts: oo_masters_to_config roles: - { role: openshift_cli } vars: __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result when: openshift_version is version_compare('3.7','<') changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 run_once: true - name: Reconcile Cluster Role Bindings command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name when: openshift_version is version_compare('3.7','<') register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' - reconcile_bindings_result.rc == 0 run_once: true - name: Reconcile Jenkins Pipeline Role Bindings command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name run_once: true register: reconcile_jenkins_role_binding_result changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 when: - openshift_version is version_compare('3.7','<') - when: openshift_upgrade_target is version_compare('3.7','<') block: - name: Retrieve shared-resource-viewer oc_obj: state: list kind: role name: "shared-resource-viewer" namespace: "openshift" register: objout - name: Determine if shared-resource-viewer is protected set_fact: __shared_resource_viewer_protected: true when: - "'results' in objout" - "'results' in objout['results']" - "'annotations' in objout['results']['results'][0]['metadata']" - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']" - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'" - copy: src: "{{ item }}" dest: "/tmp/{{ item }}" with_items: - "{{ __master_shared_resource_viewer_file }}" when: __shared_resource_viewer_protected is not defined - name: Fixup shared-resource-viewer role oc_obj: state: present kind: role name: "shared-resource-viewer" namespace: "openshift" files: - "/tmp/{{ __master_shared_resource_viewer_file }}" delete_after: true when: __shared_resource_viewer_protected is not defined register: result retries: 3 delay: 5 until: result.rc == 0 ignore_errors: true - name: Reconcile Security Context Constraints command: > {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true - name: Migrate storage post policy reconciliation command: > {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool - set_fact: reconcile_complete: True ############################################################################## # Gate on reconcile ############################################################################## - name: Gate on reconcile hosts: localhost connection: local become: no tasks: - set_fact: reconcile_completed: "{{ hostvars | lib_utils_oo_select_keys(groups.oo_masters_to_config) | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - set_fact: reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" when: reconcile_failed | length > 0 - name: Upgrade Docker on dedicated containerized etcd hosts hosts: oo_etcd_to_config:!oo_nodes_to_upgrade serial: 1 any_errors_fatal: true roles: - openshift_facts tasks: - include_tasks: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}" max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_unschedulable until: node_unschedulable is succeeded - name: Drain Node for Kubelet upgrade command: > {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) retries: 60 delay: 60 roles: - openshift_facts post_tasks: - import_role: name: openshift_node tasks_from: upgrade.yml vars: openshift_node_upgrade_in_progress: True - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_schedulable until: node_schedulable is succeeded when: node_unschedulable is changed