summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades/pre/config.yml
blob: 44af37b2d33dd93f38bd88f279b92e3408c1ad61 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
---
# for control-plane upgrade, several variables may be passed in to this play
# why may affect the tasks here and in imported playbooks.

# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml

- name: Update repos on upgrade hosts
  hosts: "{{ l_upgrade_repo_hosts }}"
  roles:
  - openshift_repos

- name: Set openshift_no_proxy_internal_hostnames
  hosts: "{{ l_upgrade_no_proxy_hosts }}"
  tasks:
  - set_fact:
      openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
                                                    | union(groups['oo_masters_to_config'])
                                                    | union(groups['oo_etcd_to_config'] | default([])))
                                                | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
                                                }}"
    when:
    - openshift_http_proxy is defined or openshift_https_proxy is defined
    - openshift_generate_no_proxy_hosts | default(True) | bool

- name: OpenShift Health Checks
  hosts: "{{ l_upgrade_health_check_hosts }}"
  any_errors_fatal: true
  roles:
  - openshift_health_checker
  vars:
  - r_openshift_health_checker_playbook_context: upgrade
  post_tasks:
  - name: Run health checks (upgrade)
    action: openshift_health_check
    args:
      checks:
      - disk_availability
      - memory_availability
      - docker_image_availability

- import_playbook: ../disable_excluders.yml

- import_playbook: ../../../../init/version.yml
  vars:
    # Request specific openshift_release and let the openshift_version role handle converting this
    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
    # defined, and overriding the normal behavior of protecting the installed version
    openshift_release: "{{ openshift_upgrade_target }}"
    # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
    # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
    # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml

- import_playbook: verify_cluster.yml

# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
  hosts: oo_masters_to_config
  tasks:
  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
    when:
    - l_upgrade_nodes_only | default(False) | bool
    - not openshift.common.version | match(openshift_version)

# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
  when: not (l_upgrade_nodes_only | default(False)) | bool

- name: Verify upgrade targets
  hosts: "{{ l_upgrade_verify_targets_hosts }}"
  roles:
  - role: openshift_facts
  tasks:
  - include_tasks: verify_upgrade_targets.yml

- name: Verify docker upgrade targets
  hosts: "{{ l_upgrade_docker_target_hosts }}"
  tasks:
  - import_role:
      name: container_runtime
      tasks_from: docker_upgrade_check.yml