summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Chaloupka <jchaloup@redhat.com>2017-02-14 09:40:36 +0100
committerGitHub <noreply@github.com>2017-02-14 09:40:36 +0100
commit7127518224d996e01a89db147434e404ebd35296 (patch)
tree6c3b1969e44e0e85e3f1935067f359fbeac017cd
parent9c09ffbd4b1c2dc9593c6fb1f312172c538f2bec (diff)
parent8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd (diff)
downloadopenshift-7127518224d996e01a89db147434e404ebd35296.tar.gz
openshift-7127518224d996e01a89db147434e404ebd35296.tar.bz2
openshift-7127518224d996e01a89db147434e404ebd35296.tar.xz
openshift-7127518224d996e01a89db147434e404ebd35296.zip
Merge pull request #3289 from mtnbikenc/upgrade-common
Modify playbooks to use oadm_manage_node module
-rw-r--r--filter_plugins/oo_filters.py23
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
-rw-r--r--roles/openshift_manage_node/meta/main.yml15
-rw-r--r--roles/openshift_manage_node/tasks/main.yml69
6 files changed, 128 insertions, 100 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index c9390efe6..a833a5d93 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -242,6 +242,28 @@ def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
+def oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
+ """Take a dict and arrange them as a list of dicts
+
+ Input data:
+ {'region': 'infra', 'test_k': 'test_v'}
+
+ Return data:
+ [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
+
+ Written for use of the oc_label module
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ rval = []
+ for label in data.items():
+ rval.append({key_title: label[0], value_title: label[1]})
+
+ return rval
+
+
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
@@ -951,6 +973,7 @@ class FilterModule(object):
"oo_ec2_volume_definition": oo_ec2_volume_definition,
"oo_combine_key_value": oo_combine_key_value,
"oo_combine_dict": oo_combine_dict,
+ "oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 13e1da961..5d3280328 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -22,12 +22,24 @@
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
+
+ roles:
+ - lib_openshift
+
tasks:
- - name: Prepare for Node draining
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
+ - name: Mark node unschedulable
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+ - inventory_hostname in groups.oo_nodes_to_upgrade
- name: Drain Node for Kubelet upgrade
command: >
@@ -39,7 +51,12 @@
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index db2c27919..a4aefcdac 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -238,29 +238,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -268,17 +261,19 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e45b635f7..e3a98fd9b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,29 +7,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -37,20 +30,22 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
- include: ../reset_excluder.yml
tags:
diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml
new file mode 100644
index 000000000..d90cd28cf
--- /dev/null
+++ b/roles/openshift_manage_node/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Manage Node
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index c06758833..73f55df12 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -1,23 +1,4 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- set_fact:
- openshift_manage_node_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_manage_node_kubeconfig }}
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
@@ -46,38 +27,40 @@
run_once: true
- name: Wait for Node Registration
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} get node {{ openshift.node.nodename }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- register: omd_get_node
- until: omd_get_node.rc == 0
+ oc_obj:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: list
+ kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ register: get_node
+ until: "'metadata' in get_node.results.results[0]"
retries: 50
delay: 5
- changed_when: false
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
+ kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Label nodes
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- when: "'nodename' in openshift.node and 'labels' in openshift.node and openshift.node.labels != {}"
- delegate_to: "{{ openshift_master_host }}"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}"
+ kubeconfig: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ namespace: default
+ when:
+ - "'nodename' in openshift.node"
+ - "'labels' in openshift.node"
+ - openshift.node.labels != {}
delegate_to: "{{ openshift_master_host }}"
- run_once: true