summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
2 files changed, 40 insertions, 50 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index db2c27919..a4aefcdac 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -238,29 +238,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -268,17 +261,19 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e45b635f7..e3a98fd9b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,29 +7,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -37,20 +30,22 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
- include: ../reset_excluder.yml
tags: