summaryrefslogtreecommitdiffstats
path: root/playbooks/init/facts.yml
blob: ac4429b23852f695789f459dec0f6fc3a7a9ad4b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
---
- name: Ensure that all non-node hosts are accessible
  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
  any_errors_fatal: true
  tasks:

- name: Initialize host facts
  hosts: oo_all_hosts
  tasks:
  - name: load openshift_facts module
    import_role:
      name: openshift_facts

  # TODO: Should this role be refactored into health_checks??
  - name: Run openshift_sanitize_inventory to set variables
    include_role:
      name: openshift_sanitize_inventory

  - name: Detecting Operating System from ostree_booted
    stat:
      path: /run/ostree-booted
    register: ostree_booted

  - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
    set_fact:
      openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
      openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"

  # TODO: Should this be moved into health checks??
  # Seems as though any check that happens with a corresponding fail should move into health_checks
  - name: Validate python version - ans_dist is fedora and python is v3
    fail:
      msg: |
        openshift-ansible requires Python 3 for {{ ansible_distribution }};
        For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
    when:
    - ansible_distribution == 'Fedora'
    - ansible_python['version']['major'] != 3

  # TODO: Should this be moved into health checks??
  # Seems as though any check that happens with a corresponding fail should move into health_checks
  - name: Validate python version - ans_dist not Fedora and python must be v2
    fail:
      msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
    when:
    - ansible_distribution != 'Fedora'
    - ansible_python['version']['major'] != 2

  # TODO: Should this be moved into health checks??
  # Seems as though any check that happens with a corresponding fail should move into health_checks
  # Fail as early as possible if Atomic and old version of Docker
  - when:
    - openshift_is_atomic | bool
    block:

    # See https://access.redhat.com/articles/2317361
    # and https://github.com/ansible/ansible/issues/15892
    # NOTE: the "'s can not be removed at this level else the docker command will fail
    # NOTE: When ansible >2.2.1.x is used this can be updated per
    # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
    - name: Determine Atomic Host Docker Version
      shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
      register: l_atomic_docker_version

    - name: assert atomic host docker version is 1.12 or later
      assert:
        that:
        - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
        msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.

  - when:
    - not openshift_is_atomic | bool
    block:
    - name: Ensure openshift-ansible installer package deps are installed
      package:
        name: "{{ item }}"
        state: present
      with_items:
      - iproute
      - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
      - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
      - yum-utils
      register: result
      until: result is succeeded

    - name: Ensure various deps for running system containers are installed
      package:
        name: "{{ item }}"
        state: present
      with_items:
      - atomic
      - ostree
      - runc
      when:
      - >
        (openshift_use_system_containers | default(False)) | bool
        or (openshift_use_etcd_system_container | default(False)) | bool
        or (openshift_use_openvswitch_system_container | default(False)) | bool
        or (openshift_use_node_system_container | default(False)) | bool
        or (openshift_use_master_system_container | default(False)) | bool
      register: result
      until: result is succeeded

  - name: Gather Cluster facts
    openshift_facts:
      role: common
      local_facts:
        deployment_type: "{{ openshift_deployment_type }}"
        deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
        hostname: "{{ openshift_hostname | default(None) }}"
        ip: "{{ openshift_ip | default(None) }}"
        public_hostname: "{{ openshift_public_hostname | default(None) }}"
        public_ip: "{{ openshift_public_ip | default(None) }}"
        portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
        http_proxy: "{{ openshift_http_proxy | default(None) }}"
        https_proxy: "{{ openshift_https_proxy | default(None) }}"
        no_proxy: "{{ openshift_no_proxy | default(None) }}"
        generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"

  - name: Set fact of no_proxy_internal_hostnames
    openshift_facts:
      role: common
      local_facts:
        no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
                                             | union(groups['oo_masters_to_config'])
                                             | union(groups['oo_etcd_to_config'] | default([])))
                                         | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
                                         }}"
    when:
    - openshift_http_proxy is defined or openshift_https_proxy is defined
    - openshift_generate_no_proxy_hosts | default(True) | bool

  - name: Initialize openshift.node.sdn_mtu
    openshift_facts:
      role: node
      local_facts:
        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"

  - name: initialize_facts set_fact repoquery command
    set_fact:
      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
      repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"

- name: Initialize special first-master variables
  hosts: oo_first_master
  roles:
  - role: openshift_facts
  tasks:
  - set_fact:
      # We need to setup openshift_client_binary here for special uses of delegate_to in
      # later roles and plays.
      first_master_client_binary: "{{  openshift_client_binary }}"