--- - name: Drain and upgrade nodes hosts: oo_nodes_to_upgrade:!oo_masters_to_config # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" any_errors_fatal: true pre_tasks: - name: Load lib_openshift modules include_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable oadm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_unschedulable until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" roles: - lib_openshift - openshift_facts - docker - openshift_node_upgrade post_tasks: - name: Set node schedulability oadm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 delay: 5 register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed - include: ../reset_excluder.yml tags: - always