From 910e23336da02b8d1eec75276ce77ec269e2216c Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 22 Sep 2016 11:48:46 -0300 Subject: Skip the docker role in early upgrade stages. This improves the situation further and prevents configuration changes from accidentally triggering docker restarts, before we've evacuated nodes. Now in two places, we skip the role entirely, instead of previous implementation which only skipped upgrading the installed version. (which did not catch config issues) --- playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index a54349220..917c95e29 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -7,10 +7,11 @@ any_errors_fatal: true roles: - openshift_facts + - docker handlers: - include: ../../../../roles/openshift_node/handlers/main.yml static: yes - tasks: + pre_tasks: # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. @@ -37,7 +38,7 @@ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade - + tasks: - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool -- cgit v1.2.1