summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
blob: 6d59bfd0b1662e6d2d8d787a3474c9c9630f6b9e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
---
- name: create new scale group
  hosts: localhost
  tasks:
  - name: build upgrade scale groups
    import_role:
      name: openshift_aws
      tasks_from: upgrade_node_group.yml

  - fail:
      msg: "Ensure that new scale groups were provisioned before proceeding to update."
    when:
    - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
    - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
    - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes

- name: initialize upgrade bits
  import_playbook: init.yml

- name: unschedule nodes
  hosts: oo_sg_current_nodes
  tasks:
  - name: Load lib_openshift modules
    import_role:
      name: ../roles/lib_openshift

  - name: Mark node unschedulable
    oc_adm_manage_node:
      node: "{{ openshift.node.nodename | lower }}"
      schedulable: False
    delegate_to: "{{ groups.oo_first_master.0 }}"
    retries: 10
    delay: 5
    register: node_unschedulable
    until: node_unschedulable is succeeded

- name: Drain nodes
  hosts: oo_sg_current_nodes
  # This var must be set with -e on invocation, as it is not a per-host inventory var
  # and is evaluated early. Values such as "20%" can also be used.
  serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
  max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
  tasks:
  - name: Drain Node for Kubelet upgrade
    command: >
      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
      --config={{ openshift.common.config_base }}/master/admin.kubeconfig
      --force --delete-local-data --ignore-daemonsets
      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
    delegate_to: "{{ groups.oo_first_master.0 }}"
    register: l_upgrade_nodes_drain_result
    until: not (l_upgrade_nodes_drain_result is failed)
    retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0  | int }}"
    delay: 5
    failed_when:
    - l_upgrade_nodes_drain_result is failed
    - openshift_upgrade_nodes_drain_timeout | default(0) == '0'

# Alright, let's clean up!
- name: clean up the old scale group
  hosts: localhost
  tasks:
  - name: clean up scale group
    import_role:
      name: openshift_aws
      tasks_from: remove_scale_group.yml