summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades
diff options
context:
space:
mode:
authorKenny Woodson <kwoodson@redhat.com>2017-12-01 21:39:00 -0500
committerKenny Woodson <kwoodson@redhat.com>2017-12-11 16:39:27 -0500
commit35c1abb6050f2cd1f31396edd42618a2998bd546 (patch)
treedfb31882e7eb459b173ea206a89bc834aba10dd0 /playbooks/common/openshift-cluster/upgrades
parenta53b8f63175c80c9d0a8d590fd0854e2ed8e1aae (diff)
downloadopenshift-35c1abb6050f2cd1f31396edd42618a2998bd546.tar.gz
openshift-35c1abb6050f2cd1f31396edd42618a2998bd546.tar.bz2
openshift-35c1abb6050f2cd1f31396edd42618a2998bd546.tar.xz
openshift-35c1abb6050f2cd1f31396edd42618a2998bd546.zip
Changing the node group format to a list.
Diffstat (limited to 'playbooks/common/openshift-cluster/upgrades')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml35
1 files changed, 21 insertions, 14 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 47410dff3..4fc897a57 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -40,14 +34,27 @@
register: node_unschedulable
until: node_unschedulable|succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result | failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
- name: clean up the old scale group