summaryrefslogtreecommitdiffstats
path: root/roles/openshift_node_upgrade/tasks/main.yml
blob: 01bd3bf386271733a45b4ce40685eb006804ed85 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
---
# input variables:
# - l_docker_upgrade
# - openshift.common.is_atomic
# - node_config_hook
# - openshift_pkg_version
# - openshift.common.is_containerized
# - deployment_type
# - openshift_release

# tasks file for openshift_node_upgrade
- include: docker/upgrade.yml
  vars:
    # We will restart Docker ourselves after everything is ready:
    skip_docker_restart: True
  when:
  - l_docker_upgrade is defined
  - l_docker_upgrade | bool
  - not openshift.common.is_containerized | bool

- include: "{{ node_config_hook }}"
  when: node_config_hook is defined

- include: rpm_upgrade.yml
  vars:
    component: "node"
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  when: not openshift.common.is_containerized | bool

- name: Remove obsolete docker-sdn-ovs.conf
  file:
    path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
    state: absent
  when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>='))
     or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))

- include: containerized_node_upgrade.yml
  when: openshift.common.is_containerized | bool

- name: Ensure containerized services stopped before Docker restart
  service:
    name: "{{ item }}"
    state: stopped
  with_items:
  - etcd_container
  - openvswitch
  - "{{ openshift.common.service_type }}-master"
  - "{{ openshift.common.service_type }}-master-api"
  - "{{ openshift.common.service_type }}-master-controllers"
  - "{{ openshift.common.service_type }}-node"
  failed_when: false
  when: openshift.common.is_containerized | bool

- name: Stop rpm based services
  service:
    name: "{{ item }}"
    state: stopped
  with_items:
  - "{{ openshift.common.service_type }}-node"
  - openvswitch
  failed_when: false
  when: not openshift.common.is_containerized | bool

- name: Upgrade openvswitch
  package:
    name: openvswitch
    state: latest
  when: not openshift.common.is_containerized | bool

- name: Restart openvswitch
  systemd:
    name: openvswitch
    state: started
  when:
  - not openshift.common.is_containerized | bool

# Mandatory Docker restart, ensure all containerized services are running:
- include: docker/restart.yml

- name: Update oreg value
  yedit:
    src: "{{ openshift.common.config_base }}/node/node-config.yaml"
    key: 'imageConfig.format'
    value: "{{ oreg_url }}"
  when: oreg_url is defined

# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
  command: grep "^[^#].*swap" /etc/fstab
  # grep: match any lines which don't begin with '#' and contain 'swap'
  # command: swapon --summary
  # Alternate option, however if swap entries are in fstab, swap will be
  # enabled at boot. Grepping fstab should catch a condition when swap was
  # disabled, but the fstab entries were not removed.
  changed_when: false
  failed_when: false
  register: swap_result

  # Disable Swap Block
- block:

  - name: Disable swap
    command: swapoff --all

  - name: Remove swap entries from /etc/fstab
    lineinfile:
      dest: /etc/fstab
      regexp: 'swap'
      state: absent

  when: swap_result.stdout_lines | length > 0
  # End Disable Swap Block

- name: Restart rpm node service
  service:
    name: "{{ openshift.common.service_type }}-node"
    state: restarted
  when: not openshift.common.is_containerized | bool

- name: Wait for node to be ready
  oc_obj:
    state: list
    kind: node
    name: "{{ openshift.common.hostname | lower }}"
  register: node_output
  delegate_to: "{{ groups.oo_first_master.0 }}"
  until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
  # Give the node two minutes to come back online.
  retries: 24
  delay: 5