summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
blob: bb7955c45c92024c1520ae9a9fb291fe2e0fd44e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
---
- name: Evacuate and upgrade nodes
  hosts: oo_nodes_to_upgrade
  # This var must be set with -e on invocation, as it is not a per-host inventory var
  # and is evaluated early. Values such as "20%" can also be used.
  serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
  any_errors_fatal: true
  roles:
  - openshift_facts
  - docker
  handlers:
  - include: ../../../../roles/openshift_node/handlers/main.yml
    static: yes
  pre_tasks:
  # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
  # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
  # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
  - name: Determine if node is currently scheduleable
    command: >
      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
    register: node_output
    delegate_to: "{{ groups.oo_first_master.0 }}"
    changed_when: false
    when: inventory_hostname in groups.oo_nodes_to_upgrade

  - set_fact:
      was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
    when: inventory_hostname in groups.oo_nodes_to_upgrade

  - name: Mark unschedulable if host is a node
    command: >
      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
    delegate_to: "{{ groups.oo_first_master.0 }}"
    when: inventory_hostname in groups.oo_nodes_to_upgrade
    # NOTE: There is a transient "object has been modified" error here, allow a couple
    # retries for a more reliable upgrade.
    register: node_unsched
    until: node_unsched.rc == 0
    retries: 3
    delay: 1

  - name: Evacuate Node for Kubelet upgrade
    command: >
      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
    delegate_to: "{{ groups.oo_first_master.0 }}"
    when: inventory_hostname in groups.oo_nodes_to_upgrade

  tasks:

  - include: docker/upgrade.yml
    vars:
      # We will restart Docker ourselves after everything is ready:
      skip_docker_restart: True
    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool

  - include: "{{ node_config_hook }}"
    when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade

  - include: rpm_upgrade.yml
    vars:
       component: "node"
       openshift_version: "{{ openshift_pkg_version | default('') }}"
    when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool

  - name: Remove obsolete docker-sdn-ovs.conf
    file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
    when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))

  - include: containerized_node_upgrade.yml
    when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool

  - name: Ensure containerized services stopped before Docker restart
    service: name={{ item }} state=stopped
    with_items:
      - etcd_container
      - openvswitch
      - "{{ openshift.common.service_type }}-master"
      - "{{ openshift.common.service_type }}-master-api"
      - "{{ openshift.common.service_type }}-master-controllers"
      - "{{ openshift.common.service_type }}-node"
    failed_when: false
    when: openshift.common.is_containerized | bool

  # Mandatory Docker restart, ensure all containerized services are running:
  - include: docker/restart.yml

  - name: Restart rpm node service
    service: name="{{ openshift.common.service_type }}-node" state=restarted
    when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
  - name: Set node schedulability
    command: >
      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
    delegate_to: "{{ groups.oo_first_master.0 }}"
    when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
    register: node_sched
    until: node_sched.rc == 0
    retries: 3
    delay: 1