summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/handlers/main.yml4
-rw-r--r--roles/calico/templates/calico.service.j24
-rw-r--r--roles/container_runtime/README.md44
-rw-r--r--roles/container_runtime/defaults/main.yml129
-rw-r--r--roles/container_runtime/handlers/main.yml (renamed from roles/docker/handlers/main.yml)4
-rw-r--r--roles/container_runtime/meta/main.yml (renamed from roles/docker/meta/main.yml)2
-rw-r--r--roles/container_runtime/tasks/common/atomic_proxy.yml (renamed from roles/openshift_atomic/tasks/proxy.yml)0
-rw-r--r--roles/container_runtime/tasks/common/post.yml26
-rw-r--r--roles/container_runtime/tasks/common/pre.yml12
-rw-r--r--roles/container_runtime/tasks/common/setup_docker_symlink.yml38
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml28
-rw-r--r--roles/container_runtime/tasks/common/udev_workaround.yml (renamed from roles/docker/tasks/udev_workaround.yml)0
-rw-r--r--roles/container_runtime/tasks/crio_firewall.yml (renamed from roles/docker/tasks/crio_firewall.yml)0
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml27
-rw-r--r--roles/container_runtime/tasks/main.yml2
-rw-r--r--roles/container_runtime/tasks/package_docker.yml (renamed from roles/docker/tasks/package_docker.yml)68
-rw-r--r--roles/container_runtime/tasks/registry_auth.yml (renamed from roles/docker/tasks/registry_auth.yml)0
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml96
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml101
-rw-r--r--roles/container_runtime/templates/80-openshift-sdn.conf.j2 (renamed from roles/docker/templates/80-openshift-sdn.conf.j2)0
-rw-r--r--roles/container_runtime/templates/crio.conf.j2 (renamed from roles/docker/templates/crio.conf.j2)0
-rw-r--r--roles/container_runtime/templates/custom.conf.j2 (renamed from roles/docker/templates/custom.conf.j2)0
-rw-r--r--roles/container_runtime/templates/daemon.json (renamed from roles/docker/templates/daemon.json)4
-rw-r--r--roles/container_runtime/templates/overlay.conf.j2 (renamed from roles/docker/templates/overlay.conf.j2)0
-rw-r--r--roles/container_runtime/templates/registries.conf (renamed from roles/docker/templates/registries.conf)0
-rw-r--r--roles/container_runtime/templates/systemcontainercustom.conf.j2 (renamed from roles/docker/templates/systemcontainercustom.conf.j2)0
-rw-r--r--roles/container_runtime/vars/main.yml (renamed from roles/docker/vars/main.yml)0
-rw-r--r--roles/contiv/defaults/main.yml2
-rw-r--r--roles/contiv/tasks/netplugin.yml2
-rw-r--r--roles/contiv/templates/aci-gw.service2
-rw-r--r--roles/docker/README.md43
-rw-r--r--roles/docker/defaults/main.yml40
-rw-r--r--roles/docker/tasks/main.yml93
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml187
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml190
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/etcd/templates/etcd.docker.service8
-rw-r--r--roles/flannel/defaults/main.yaml2
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/templates/configmap.yaml.j21
-rw-r--r--roles/nuage_master/handlers/main.yaml6
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml6
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml2
-rw-r--r--roles/openshift_aws/tasks/provision.yml10
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml2
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml8
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml2
-rw-r--r--roles/openshift_aws/tasks/security_group.yml4
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml8
-rw-r--r--roles/openshift_cli/defaults/main.yml5
-rw-r--r--roles/openshift_cli/meta/main.yml2
-rw-r--r--roles/openshift_cli/tasks/main.yml6
-rw-r--r--roles/openshift_cluster_autoscaler/tasks/main.yml2
-rw-r--r--roles/openshift_docker/defaults/main.yml1
-rw-r--r--roles/openshift_docker/meta/main.yml16
-rw-r--r--roles/openshift_docker/tasks/main.yml1
-rw-r--r--roles/openshift_docker_facts/defaults/main.yml1
-rw-r--r--roles/openshift_docker_facts/meta/main.yml15
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml39
-rw-r--r--roles/openshift_docker_facts/vars/main.yml2
-rw-r--r--roles/openshift_etcd/meta/main.yml2
-rw-r--r--roles/openshift_facts/defaults/main.yml94
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py396
-rw-r--r--roles/openshift_hosted/defaults/main.yml11
-rw-r--r--roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py2
-rw-r--r--roles/openshift_hosted/tasks/registry.yml43
-rw-r--r--roles/openshift_hosted/tasks/router.yml18
-rw-r--r--roles/openshift_hosted/tasks/secure.yml14
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml10
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml2
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml4
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml18
-rw-r--r--roles/openshift_hosted_metrics/README.md54
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml31
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml18
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml132
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml75
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml21
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml2
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j210
-rw-r--r--roles/openshift_logging/defaults/main.yml10
-rw-r--r--roles/openshift_logging/handlers/main.yml4
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml18
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml6
-rw-r--r--roles/openshift_logging/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml1
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml1
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_management/tasks/main.yml12
-rw-r--r--roles/openshift_management/tasks/storage/storage.yml2
-rw-r--r--roles/openshift_master/defaults/main.yml2
-rw-r--r--roles/openshift_master/handlers/main.yml2
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml51
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml8
-rw-r--r--roles/openshift_master/tasks/system_container.yml4
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml19
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j28
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j24
-rw-r--r--roles/openshift_master_cluster/README.md34
-rw-r--r--roles/openshift_master_cluster/meta/main.yml15
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml43
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml14
-rw-r--r--roles/openshift_master_facts/defaults/main.yml1
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py26
-rw-r--r--roles/openshift_master_facts/tasks/main.yml11
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_nfs/tasks/setup.yml3
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml11
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml6
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml14
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml8
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml8
-rw-r--r--roles/openshift_node/templates/node.service.j26
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service10
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml2
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml4
-rw-r--r--roles/openshift_node_facts/tasks/main.yml1
-rw-r--r--roles/openshift_openstack/defaults/main.yml9
-rw-r--r--roles/openshift_openstack/tasks/check-prerequisites.yml8
-rw-r--r--roles/openshift_openstack/tasks/hostname.yml26
-rw-r--r--roles/openshift_openstack/tasks/node-configuration.yml6
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml19
-rw-r--r--roles/openshift_openstack/tasks/provision.yml4
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j2136
-rw-r--r--roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py157
-rw-r--r--roles/openshift_persistent_volumes/defaults/main.yml9
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml57
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml17
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml17
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j22
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml10
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml20
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j216
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_version/meta/main.yml3
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml11
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
169 files changed, 1136 insertions, 2167 deletions
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 67fc0065f..9cc0604a3 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -3,10 +3,10 @@
become: yes
systemd: name=calico state=restarted
-- name: restart docker
+- name: restart container runtime
become: yes
systemd:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: restarted
register: l_docker_restart_docker_in_calico_result
until: not l_docker_restart_docker_in_calico_result | failed
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index 7653e19b1..a7809b9f9 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -1,7 +1,7 @@
[Unit]
Description=calico
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
[Service]
Restart=always
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
new file mode 100644
index 000000000..51f469aaf
--- /dev/null
+++ b/roles/container_runtime/README.md
@@ -0,0 +1,44 @@
+Container Runtime
+=========
+
+Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+
+container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+
+This role is designed to be used with include_role and tasks_from.
+
+Entry points
+------------
+* package_docker.yml - install and setup docker container runtime.
+* systemcontainer_docker.yml - utilize docker + systemcontainer
+* systemcontainer_crio.yml - utilize crio + systemcontainer
+* registry_auth.yml - place docker login credentials.
+
+Requirements
+------------
+
+Ansible 2.4
+
+
+Dependencies
+------------
+
+Depends on openshift_facts having already been run.
+
+Example Playbook
+----------------
+
+ - hosts: servers
+ tasks:
+ - include_role: container_runtime
+ tasks_from: package_docker.yml
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Red Hat, Inc
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
new file mode 100644
index 000000000..bd96965ac
--- /dev/null
+++ b/roles/container_runtime/defaults/main.yml
@@ -0,0 +1,129 @@
+---
+docker_cli_auth_config_path: '/root/.docker'
+openshift_docker_signature_verification: False
+
+repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+openshift_docker_alternative_creds: False
+
+# oreg_url is defined by user input.
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_replace: False
+
+openshift_docker_use_system_container: False
+openshift_docker_disable_push_dockerhub: False # bool
+openshift_docker_selinux_enabled: True
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+
+openshift_docker_hosted_registry_insecure: False # bool
+
+openshift_docker_hosted_registry_network_default: "{{ openshift_portal_net | default(False) }}"
+openshift_docker_hosted_registry_network: "{{ openshift_docker_hosted_registry_network_default }}"
+
+openshift_docker_additional_registries: []
+openshift_docker_blocked_registries: []
+openshift_docker_insecure_registries: []
+
+openshift_docker_ent_reg: 'registry.access.redhat.com'
+
+openshift_docker_options: False # str
+openshift_docker_log_driver: False # str
+openshift_docker_log_options: []
+
+# The l2_docker_* variables convert csv strings to lists, if
+# necessary. These variables should be used in place of their respective
+# openshift_docker_* counterparts to ensure the properly formatted lists are
+# utilized.
+l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
+l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
+l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+l2_docker_log_options: "{% if openshift_docker_log_options is string %}{% if ',' in openshift_docker_log_options %}{{ openshift_docker_log_options.split(',') | list }}{% else %}{{ [ openshift_docker_log_options ] }}{% endif %}{% else %}{{ openshift_docker_log_options }}{% endif %}"
+
+openshift_docker_use_etc_containers: False
+containers_registries_conf_path: /etc/containers/registries.conf
+
+r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_crio_os_firewall_deny: []
+r_crio_os_firewall_allow:
+- service: crio
+ port: 10010/tcp
+
+
+openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}"
+
+docker_alt_storage_path: /var/lib/containers/docker
+docker_default_storage_path: /var/lib/docker
+
+# Set local versions of facts that must be in json format for container-daemon.json
+# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
+l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
+l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
+l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
+l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
+l_docker_selinux_enabled: "{{ openshift_docker_selinux_enabled | to_json }}"
+
+docker_http_proxy: "{{ openshift_http_proxy | default('') }}"
+docker_https_proxy: "{{ openshift.common.https_proxy | default('') }}"
+docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}"
+
+openshift_use_crio: False
+openshift_use_crio_only: False
+
+l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}"
+l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+
+# --------------------- #
+# systemcontainers_crio #
+# --------------------- #
+l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
+l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
+l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+
+openshift_crio_image_tag_default: "latest"
+
+l_crt_crio_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
+
+l_crt_crio_image_prepend_dict:
+ openshift-enterprise: "registry.access.redhat.com/openshift3"
+ origin: "docker.io/gscrivano"
+
+l_crt_crio_image_dict:
+ Fedora:
+ crio_image_name: "cri-o-fedora"
+ crio_image_tag: "latest"
+ CentOS:
+ crio_image_name: "cri-o-centos"
+ crio_image_tag: "latest"
+ RedHat:
+ crio_image_name: "cri-o"
+ crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
+
+l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+
+l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
+
+# ----------------------- #
+# systemcontainers_docker #
+# ----------------------- #
+l_crt_docker_image_prepend_dict:
+ Fedora: "registry.fedoraproject.org/f25"
+ Centos: "docker.io/gscrivano"
+ RedHat: "registry.access.redhat.com/openshift3"
+
+openshift_docker_image_tag_default: "latest"
+l_crt_docker_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
+
+l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
+
+l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
diff --git a/roles/docker/handlers/main.yml b/roles/container_runtime/handlers/main.yml
index 866ed0452..67cd6d782 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/container_runtime/handlers/main.yml
@@ -1,8 +1,8 @@
---
-- name: restart docker
+- name: restart container runtime
systemd:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: restarted
daemon_reload: yes
register: r_docker_restart_docker_result
diff --git a/roles/docker/meta/main.yml b/roles/container_runtime/meta/main.yml
index d5faae8df..02fceb745 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/container_runtime/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: OpenShift
- description: docker package install
+ description: container runtime install and configure
company: Red Hat, Inc
license: ASL 2.0
min_ansible_version: 2.2
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml
index dde099984..dde099984 100644
--- a/roles/openshift_atomic/tasks/proxy.yml
+++ b/roles/container_runtime/tasks/common/atomic_proxy.yml
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
new file mode 100644
index 000000000..d790eb2c0
--- /dev/null
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
+- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+# registry_auth is called directly with include_role in some places, so we
+# have to put it in the root of the tasks/ directory.
+- include_tasks: ../registry_auth.yml
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- include_tasks: setup_docker_symlink.yml
+ when:
+ - openshift_use_crio
+ - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml
new file mode 100644
index 000000000..990fe66da
--- /dev/null
+++ b/roles/container_runtime/tasks/common/pre.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: udev_workaround.yml
+ when: docker_udev_workaround | default(False) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not openshift_use_crio_only | bool
diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
new file mode 100644
index 000000000..d7aeb192e
--- /dev/null
+++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
@@ -0,0 +1,38 @@
+---
+- block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift_docker_service_name }}"
+
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift_docker_service_name }}"
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
new file mode 100644
index 000000000..715ed492d
--- /dev/null
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml
index 257c3123d..257c3123d 100644
--- a/roles/docker/tasks/udev_workaround.yml
+++ b/roles/container_runtime/tasks/common/udev_workaround.yml
diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/container_runtime/tasks/crio_firewall.yml
index fbd1ff515..fbd1ff515 100644
--- a/roles/docker/tasks/crio_firewall.yml
+++ b/roles/container_runtime/tasks/crio_firewall.yml
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
new file mode 100644
index 000000000..e62cf5505
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -0,0 +1,27 @@
+---
+# Sanity checks to ensure the role will complete and provide helpful error
+# messages for common problems.
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
new file mode 100644
index 000000000..96d8606c6
--- /dev/null
+++ b/roles/container_runtime/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+# This role is meant to be used with include_role and tasks_from.
diff --git a/roles/docker/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 044b04478..89899c9cf 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: common/pre.yml
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -7,35 +9,16 @@
until: curr_docker_version | succeeded
changed_when: false
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+# Some basic checks to ensure the role will complete
+- include_tasks: docker_sanity.yml
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ package:
+ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
+ state: present
when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
register: result
until: result | success
@@ -52,7 +35,7 @@
dest: "{{ docker_systemd_dir }}/custom.conf"
src: custom.conf.j2
notify:
- - restart docker
+ - restart container runtime
when: not (os_firewall_use_firewalld | default(False)) | bool
- stat: path=/etc/sysconfig/docker
@@ -78,7 +61,7 @@
reg_fact_val: "{{ l2_docker_insecure_registries }}"
reg_flag: --insecure-registry
notify:
- - restart docker
+ - restart container runtime
- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf
template:
@@ -86,7 +69,7 @@
src: registries.conf
when: openshift_docker_use_etc_containers | bool
notify:
- - restart docker
+ - restart container runtime
- name: Set Proxy Settings
lineinfile:
@@ -96,30 +79,34 @@
state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
with_items:
- reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ reg_fact_val: "{{ docker_http_proxy }}"
- reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ reg_fact_val: "{{ docker_https_proxy }}"
- reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') }}"
+ reg_fact_val: "{{ docker_no_proxy }}"
notify:
- - restart docker
+ - restart container runtime
when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+ - docker_check.stat.isreg is defined
+ - docker_check.stat.isreg
+ - docker_http_proxy != '' or docker_https_proxy != ''
- name: Set various Docker options
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
+ {% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \
+ {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \
{% if docker_options is defined %} {{ docker_options }}{% endif %} \
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \
+ {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \
+ {% if openshift_docker_disable_push_dockerhub %} --confirm-def-push={{ openshift_docker_disable_push_dockerhub | bool }}{% endif %} \
--signature-verification={{ openshift_docker_signature_verification | bool }}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- - restart docker
+ - restart container runtime
- stat: path=/etc/sysconfig/docker-network
register: sysconfig_docker_network_check
@@ -134,7 +121,7 @@
- sysconfig_docker_network_check.stat.isreg is defined
- sysconfig_docker_network_check.stat.isreg
notify:
- - restart docker
+ - restart container runtime
# The following task is needed as the systemd module may report a change in
# state even though docker is already running.
@@ -157,7 +144,4 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- meta: flush_handlers
-
-# This needs to run after docker is restarted to account for proxy settings.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml
index 2c7bc5711..2c7bc5711 100644
--- a/roles/docker/tasks/registry_auth.yml
+++ b/roles/container_runtime/tasks/registry_auth.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
new file mode 100644
index 000000000..5ea7df650
--- /dev/null
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -0,0 +1,96 @@
+---
+# TODO: Much of this file is shared with container engine tasks
+- name: Check we are not using node as a Docker container with CRI-O
+ fail: msg='Cannot use CRI-O with node configured as a Docker container'
+ when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+
+- include_tasks: common/pre.yml
+
+- include_tasks: common/syscontainer_packages.yml
+
+- name: Check that overlay is in the kernel
+ shell: lsmod | grep overlay
+ register: l_has_overlay_in_kernel
+ ignore_errors: yes
+ failed_when: false
+
+- when: l_has_overlay_in_kernel.rc != 0
+ block:
+
+ - name: Add overlay to modprobe.d
+ template:
+ dest: /etc/modules-load.d/overlay.conf
+ src: overlay.conf.j2
+ backup: yes
+
+ - name: Manually modprobe overlay into the kernel
+ command: modprobe overlay
+
+ - name: Enable and start systemd-modules-load
+ service:
+ name: systemd-modules-load
+ enabled: yes
+ state: restarted
+
+- name: Ensure proxies are in the atomic.conf
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_crio_image
+
+# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
+- name: Pre-pull CRI-O System Container image
+ command: "atomic pull --storage ostree {{ l_crio_image }}"
+ changed_when: false
+ environment:
+ NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
+
+- name: Install CRI-O System Container
+ oc_atomic_container:
+ name: "cri-o"
+ image: "{{ l_crio_image }}"
+ state: latest
+
+- name: Remove CRI-O default configuration files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/cni/net.d/200-loopback.conf
+ - /etc/cni/net.d/100-crio-bridge.conf
+
+- name: Create the CRI-O configuration
+ template:
+ dest: /etc/crio/crio.conf
+ src: crio.conf.j2
+ backup: yes
+
+- name: Ensure CNI configuration directory exists
+ file:
+ path: /etc/cni/net.d/
+ state: directory
+
+- name: setup firewall for CRI-O
+ import_tasks: crio_firewall.yml
+
+- name: Configure the CNI network
+ template:
+ dest: /etc/cni/net.d/openshift-sdn.conf
+ src: 80-openshift-sdn.conf.j2
+
+- name: Start the CRI-O service
+ systemd:
+ name: "cri-o"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+# If we are using crio only, docker.service might not be available for
+# 'docker login'
+- include_tasks: common/post.yml
+ vars:
+ openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
new file mode 100644
index 000000000..10570fe34
--- /dev/null
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -0,0 +1,101 @@
+---
+# If docker_options are provided we should fail. We should not install docker and ignore
+# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
+- name: Fail quickly if openshift_docker_options are set
+ assert:
+ that:
+ - "{% if not openshift_docker_options %}1{% else %}0{% endif %}"
+ msg: |
+ Docker via System Container does not allow for the use of the openshift_docker_options
+ variable. If you want to use openshift_docker_options you will need to use the
+ traditional docker package install. Otherwise, comment out openshift_docker_options
+ in your inventory file.
+
+- include_tasks: common/pre.yml
+
+- include_tasks: common/syscontainer_packages.yml
+
+# Make sure Docker is installed so we are able to use the client
+- name: Install Docker so we can use the client
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Make sure docker is disabled. Errors are ignored.
+- name: Disable Docker
+ systemd:
+ name: docker
+ enabled: no
+ state: stopped
+ daemon_reload: yes
+ ignore_errors: True
+ register: r_docker_systemcontainer_docker_stop_result
+ until: not r_docker_systemcontainer_docker_stop_result | failed
+ retries: 3
+ delay: 30
+
+- name: Ensure proxies are in the atomic.conf
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_docker_image
+
+# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
+- name: Pre-pull Container Engine System Container image
+ command: "atomic pull --storage ostree {{ l_docker_image }}"
+ changed_when: false
+ environment:
+ NO_PROXY: "{{ docker_no_proxy }}"
+
+
+- name: Ensure container-engine.service.d directory exists
+ file:
+ path: "{{ container_engine_systemd_dir }}"
+ state: directory
+
+- name: Ensure /etc/docker directory exists
+ file:
+ path: "{{ docker_conf_dir }}"
+ state: directory
+
+- name: Install Container Engine System Container
+ oc_atomic_container:
+ name: "{{ openshift_docker_service_name }}"
+ image: "{{ l_docker_image }}"
+ state: latest
+
+- name: Configure Container Engine Service File
+ template:
+ dest: "{{ container_engine_systemd_dir }}/custom.conf"
+ src: systemcontainercustom.conf.j2
+
+# Configure container-engine using the container-daemon.json file
+# NOTE: daemon.json and container-daemon.json have been seperated to avoid
+# collision.
+- name: Configure Container Engine
+ template:
+ dest: "{{ docker_conf_dir }}/container-daemon.json"
+ src: daemon.json
+
+# Enable and start the container-engine service
+- name: Start the Container Engine service
+ systemd:
+ name: "{{ openshift_docker_service_name }}"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: r_docker_systemcontainer_docker_start_result
+ until: not r_docker_systemcontainer_docker_start_result | failed
+ retries: 3
+ delay: 30
+
+- set_fact:
+ docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+
+# Since docker is running as a system container, docker login will fail to create
+# credentials. Use alternate method if requiring authenticated registries.
+- include_tasks: common/post.yml
+ vars:
+ openshift_docker_alternative_creds: True
diff --git a/roles/docker/templates/80-openshift-sdn.conf.j2 b/roles/container_runtime/templates/80-openshift-sdn.conf.j2
index a693aea5f..a693aea5f 100644
--- a/roles/docker/templates/80-openshift-sdn.conf.j2
+++ b/roles/container_runtime/templates/80-openshift-sdn.conf.j2
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2
index 3f066a17f..3f066a17f 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/container_runtime/templates/crio.conf.j2
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/container_runtime/templates/custom.conf.j2
index 713412473..713412473 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/container_runtime/templates/custom.conf.j2
diff --git a/roles/docker/templates/daemon.json b/roles/container_runtime/templates/daemon.json
index a41b7cdbd..383963bd3 100644
--- a/roles/docker/templates/daemon.json
+++ b/roles/container_runtime/templates/daemon.json
@@ -5,8 +5,8 @@
"disable-legacy-registry": false,
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": {{ l_docker_insecure_registries }},
-{% if docker_log_driver is defined %}
- "log-driver": "{{ docker_log_driver }}",
+{% if openshift_docker_log_driver is defined %}
+ "log-driver": "{{ openshift_docker_log_driver }}",
{%- endif %}
"log-opts": {{ l_docker_log_options }},
"runtimes": {
diff --git a/roles/docker/templates/overlay.conf.j2 b/roles/container_runtime/templates/overlay.conf.j2
index 782f46c2e..782f46c2e 100644
--- a/roles/docker/templates/overlay.conf.j2
+++ b/roles/container_runtime/templates/overlay.conf.j2
diff --git a/roles/docker/templates/registries.conf b/roles/container_runtime/templates/registries.conf
index d379b2be0..d379b2be0 100644
--- a/roles/docker/templates/registries.conf
+++ b/roles/container_runtime/templates/registries.conf
diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/container_runtime/templates/systemcontainercustom.conf.j2
index 86eebfba6..86eebfba6 100644
--- a/roles/docker/templates/systemcontainercustom.conf.j2
+++ b/roles/container_runtime/templates/systemcontainercustom.conf.j2
diff --git a/roles/docker/vars/main.yml b/roles/container_runtime/vars/main.yml
index 4e940b7f5..4e940b7f5 100644
--- a/roles/docker/vars/main.yml
+++ b/roles/container_runtime/vars/main.yml
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index b5d2f7c6e..aa976d921 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -119,3 +119,5 @@ contiv_h1_gw_default: "10.129.0.1"
# contiv default private subnet for ext access
contiv_private_ext_subnet: "10.130.0.0/16"
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 0b2f91bab..cf92a8cc0 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -105,7 +105,7 @@
- name: Docker | Restart docker
service:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: restarted
when: docker_updated|changed
register: l_docker_restart_docker_in_contiv_result
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 4506d2231..90bb98001 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,6 +1,6 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service
[Service]
ExecStart={{ bin_dir }}/aci_gw.sh start
diff --git a/roles/docker/README.md b/roles/docker/README.md
deleted file mode 100644
index 19908c036..000000000
--- a/roles/docker/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-Docker
-=========
-
-Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
-
-container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-
-Requirements
-------------
-
-Ansible 2.2
-
-Role Variables
---------------
-
-docker_conf_dir: location of the Docker configuration directory
-docker_systemd_dir location of the systemd directory for Docker
-docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
-udevw_udevd_dir: location of systemd config for systemd-udevd.service
-
-Dependencies
-------------
-
-Depends on the os_firewall role.
-
-Example Playbook
-----------------
-
- - hosts: servers
- roles:
- - role: docker
- docker_udev_workaround: "true"
- docker_use_system_container: False
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
deleted file mode 100644
index 224844a06..000000000
--- a/roles/docker/defaults/main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-docker_cli_auth_config_path: '/root/.docker'
-openshift_docker_signature_verification: False
-
-openshift_docker_alternative_creds: False
-
-# oreg_url is defined by user input.
-oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-oreg_auth_credentials_replace: False
-
-openshift_docker_additional_registries: []
-openshift_docker_blocked_registries: []
-openshift_docker_insecure_registries: []
-
-openshift_docker_ent_reg: 'registry.access.redhat.com'
-
-# The l2_docker_* variables convert csv strings to lists, if
-# necessary. These variables should be used in place of their respective
-# openshift_docker_* counterparts to ensure the properly formatted lists are
-# utilized.
-l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
-l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
-l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
-
-openshift_docker_use_etc_containers: False
-containers_registries_conf_path: /etc/containers/registries.conf
-
-r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
-r_crio_os_firewall_deny: []
-r_crio_os_firewall_allow:
-- service: crio
- port: 10010/tcp
-
-
-openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}"
-
-docker_alt_storage_path: /var/lib/containers/docker
-docker_default_storage_path: /var/lib/docker
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
deleted file mode 100644
index b02a74711..000000000
--- a/roles/docker/tasks/main.yml
+++ /dev/null
@@ -1,93 +0,0 @@
----
-# These tasks dispatch to the proper set of docker tasks based on the
-# inventory:openshift_docker_use_system_container variable
-
-- include_tasks: udev_workaround.yml
- when: docker_udev_workaround | default(False) | bool
-
-- set_fact:
- l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
- l_use_crio: "{{ openshift_use_crio | default(False) }}"
- l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
- - not l_use_crio_only
-
-- name: Use Package Docker if Requested
- include_tasks: package_docker.yml
- when:
- - not l_use_system_container
- - not l_use_crio_only
-
-- name: Ensure /var/lib/containers exists
- file:
- path: /var/lib/containers
- state: directory
-
-- name: Fix SELinux Permissions on /var/lib/containers
- command: "restorecon -R /var/lib/containers/"
- changed_when: false
-
-- name: Use System Container Docker if Requested
- include_tasks: systemcontainer_docker.yml
- when:
- - l_use_system_container
- - not l_use_crio_only
-
-- name: Add CRI-O usage Requested
- include_tasks: systemcontainer_crio.yml
- when:
- - l_use_crio
- - openshift_docker_is_node_or_master | bool
-
-- name: stat the docker data dir
- stat:
- path: "{{ docker_default_storage_path }}"
- register: dockerstat
-
-- when:
- - l_use_crio
- - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
- block:
- - name: stop the current running docker
- systemd:
- state: stopped
- name: "{{ openshift.docker.service_name }}"
-
- - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
- command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc != 0
-
- - name: "Set the selinux context on {{ docker_alt_storage_path }}"
- command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc == 1
- - "'already exists' not in results.stderr"
-
- - name: "restorecon the {{ docker_alt_storage_path }}"
- command: "restorecon -r {{ docker_alt_storage_path }}"
-
- - name: Remove the old docker location
- file:
- state: absent
- path: "{{ docker_default_storage_path }}"
-
- - name: Setup the link
- file:
- state: link
- src: "{{ docker_alt_storage_path }}"
- path: "{{ docker_default_storage_path }}"
-
- - name: start docker
- systemd:
- state: started
- name: "{{ openshift.docker.service_name }}"
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
deleted file mode 100644
index 3439aa353..000000000
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ /dev/null
@@ -1,187 +0,0 @@
----
-
-# TODO: Much of this file is shared with container engine tasks
-- set_fact:
- l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
-- set_fact:
- l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
-- set_fact:
- l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
-
-- set_fact:
- l_openshift_image_tag: "{{ openshift_image_tag | string }}"
- when: openshift_image_tag is defined
-
-- set_fact:
- l_openshift_image_tag: "latest"
- when:
- - openshift_image_tag is not defined
- - openshift_release == "latest"
-
-- set_fact:
- l_openshift_image_tag: "{{ openshift_release | string }}"
- when:
- - openshift_image_tag is not defined
- - openshift_release != "latest"
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-- name: Check we are not using node as a Docker container with CRI-O
- fail: msg='Cannot use CRI-O with node configured as a Docker container'
- when:
- - openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-
-- name: Check that overlay is in the kernel
- shell: lsmod | grep overlay
- register: l_has_overlay_in_kernel
- ignore_errors: yes
- failed_when: false
-
-- when: l_has_overlay_in_kernel.rc != 0
- block:
-
- - name: Add overlay to modprobe.d
- template:
- dest: /etc/modules-load.d/overlay.conf
- src: overlay.conf.j2
- backup: yes
-
- - name: Manually modprobe overlay into the kernel
- command: modprobe overlay
-
- - name: Enable and start systemd-modules-load
- service:
- name: systemd-modules-load
- enabled: yes
- state: restarted
-
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set CRI-O image defaults
- set_fact:
- l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "cri-o-fedora"
- l_crio_image_tag: "latest"
-
- - name: Use Centos based image when distribution is CentOS
- set_fact:
- l_crio_image_name: "cri-o-centos"
- when: ansible_distribution == "CentOS"
-
- - name: Set CRI-O image tag
- set_fact:
- l_crio_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use RHEL based image when distribution is Red Hat
- set_fact:
- l_crio_image_prepend: "registry.access.redhat.com/openshift3"
- l_crio_image_name: "cri-o"
- when: ansible_distribution == "RedHat"
-
- - name: Set the full image name
- set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
-
- # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- - name: Use a specific image if requested
- set_fact:
- l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
- when:
- - openshift_crio_systemcontainer_image_override is defined
- - openshift_crio_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_crio_image
-
-# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
-- name: Pre-pull CRI-O System Container image
- command: "atomic pull --storage ostree {{ l_crio_image }}"
- changed_when: false
- environment:
- NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
-
-- name: Install CRI-O System Container
- oc_atomic_container:
- name: "cri-o"
- image: "{{ l_crio_image }}"
- state: latest
-
-- name: Remove CRI-O default configuration files
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/cni/net.d/200-loopback.conf
- - /etc/cni/net.d/100-crio-bridge.conf
-
-- name: Create the CRI-O configuration
- template:
- dest: /etc/crio/crio.conf
- src: crio.conf.j2
- backup: yes
-
-- name: Ensure CNI configuration directory exists
- file:
- path: /etc/cni/net.d/
- state: directory
-
-- name: setup firewall for CRI-O
- include_tasks: crio_firewall.yml
- static: yes
-
-- name: Configure the CNI network
- template:
- dest: /etc/cni/net.d/openshift-sdn.conf
- src: 80-openshift-sdn.conf.j2
-
-- name: Start the CRI-O service
- systemd:
- name: "cri-o"
- enabled: yes
- state: started
- daemon_reload: yes
- register: start_result
-
-- meta: flush_handlers
-
-# If we are using crio only, docker.service might not be available for
-# 'docker login'
-- include_tasks: registry_auth.yml
- vars:
- openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
deleted file mode 100644
index 881d83f50..000000000
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ /dev/null
@@ -1,190 +0,0 @@
----
-
-- set_fact:
- l_openshift_image_tag: "{{ openshift_image_tag | string }}"
- when: openshift_image_tag is defined
-
-- set_fact:
- l_openshift_image_tag: "latest"
- when:
- - openshift_image_tag is not defined
- - openshift_release == "latest"
-
-- set_fact:
- l_openshift_image_tag: "{{ openshift_release | string }}"
- when:
- - openshift_image_tag is not defined
- - openshift_release != "latest"
-
-# If docker_options are provided we should fail. We should not install docker and ignore
-# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
-- name: Fail quickly if openshift_docker_options are set
- assert:
- that:
- - docker_options is defined
- - docker_options != ""
- msg: |
- Docker via System Container does not allow for the use of the openshift_docker_options
- variable. If you want to use openshift_docker_options you will need to use the
- traditional docker package install. Otherwise, comment out openshift_docker_options
- in your inventory file.
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Make sure Docker is installed so we are able to use the client
-- name: Install Docker so we can use the client
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Make sure docker is disabled. Errors are ignored.
-- name: Disable Docker
- systemd:
- name: docker
- enabled: no
- state: stopped
- daemon_reload: yes
- ignore_errors: True
- register: r_docker_systemcontainer_docker_stop_result
- until: not r_docker_systemcontainer_docker_stop_result | failed
- retries: 3
- delay: 30
-
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set to default prepend
- set_fact:
- l_docker_image_prepend: "gscrivano"
- l_docker_image_tag: "latest"
-
- - name: Set container engine image tag
- set_fact:
- l_docker_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use Red Hat Registry for image when distribution is Red Hat
- set_fact:
- l_docker_image_prepend: "registry.access.redhat.com/openshift3"
- when: ansible_distribution == 'RedHat'
-
- - name: Use Fedora Registry for image when distribution is Fedora
- set_fact:
- l_docker_image_prepend: "registry.fedoraproject.org/f25"
- when: ansible_distribution == 'Fedora'
-
- - name: Set the full image name
- set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}"
-
- # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- - name: Use a specific image if requested
- set_fact:
- l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
- when:
- - openshift_docker_systemcontainer_image_override is defined
- - openshift_docker_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_docker_image
-
-# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
-- name: Pre-pull Container Engine System Container image
- command: "atomic pull --storage ostree {{ l_docker_image }}"
- changed_when: false
- environment:
- NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
-
-- name: Ensure container-engine.service.d directory exists
- file:
- path: "{{ container_engine_systemd_dir }}"
- state: directory
-
-- name: Ensure /etc/docker directory exists
- file:
- path: "{{ docker_conf_dir }}"
- state: directory
-
-- name: Install Container Engine System Container
- oc_atomic_container:
- name: "{{ openshift.docker.service_name }}"
- image: "{{ l_docker_image }}"
- state: latest
-
-- name: Configure Container Engine Service File
- template:
- dest: "{{ container_engine_systemd_dir }}/custom.conf"
- src: systemcontainercustom.conf.j2
-
-# Set local versions of facts that must be in json format for container-daemon.json
-# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
-- set_fact:
- l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}"
- l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
- l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}"
- l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}"
- l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
-
-# Configure container-engine using the container-daemon.json file
-# NOTE: daemon.json and container-daemon.json have been seperated to avoid
-# collision.
-- name: Configure Container Engine
- template:
- dest: "{{ docker_conf_dir }}/container-daemon.json"
- src: daemon.json
-
-# Enable and start the container-engine service
-- name: Start the Container Engine service
- systemd:
- name: "{{ openshift.docker.service_name }}"
- enabled: yes
- state: started
- daemon_reload: yes
- register: r_docker_systemcontainer_docker_start_result
- until: not r_docker_systemcontainer_docker_start_result | failed
- retries: 3
- delay: 30
-
-- set_fact:
- docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-
-- meta: flush_handlers
-
-# Since docker is running as a system container, docker login will fail to create
-# credentials. Use alternate method if requiring authenticated registries.
-- include_tasks: registry_auth.yml
- vars:
- openshift_docker_alternative_creds: True
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index a069e4d87..3038ed9f6 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -97,3 +97,5 @@ r_etcd_os_firewall_allow:
# set the backend quota to 4GB by default
etcd_quota_backend_bytes: 4294967296
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 82ac4fc84..ca8b6a707 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,9 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index 99ae37319..4c25a9955 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -1,8 +1,8 @@
[Unit]
Description=The Etcd Server container
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 988731ef2..488b6b0bc 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -5,3 +5,5 @@ etcd_hosts: "{{ etcd_urls }}"
etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 889069485..80e4d391d 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -6,7 +6,7 @@
- name: restart docker
become: yes
systemd:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: restarted
register: l_docker_restart_docker_in_flannel_result
until: not l_docker_restart_docker_in_flannel_result | failed
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 3cb1fa8d0..83ca83350 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -86,7 +86,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_logging': {
'title': 'Logging Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
+ 'playbook': 'playbooks/openshift-logging/config.yml'
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 6bf6c1db2..96c215f00 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -229,6 +229,7 @@ data:
# TODO (apuimedo): Remove the duplicated line just after this one once the
# RDO packaging contains the upstream patch
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 410b739e9..cb83c8ead 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -3,8 +3,7 @@
systemd: name={{ openshift.common.service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
- (not master_api_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_api_service_status_changed | default(false))
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
@@ -16,6 +15,5 @@
until: result.rc == 0
when: >
(openshift_master_ha | bool) and
- (not master_controllers_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_controllers_service_status_changed | default(false))
ignore_errors: yes
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
deleted file mode 100644
index 8c10c9991..000000000
--- a/roles/openshift_atomic/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-OpenShift Atomic
-================
-
-This role houses atomic specific tasks.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-```
-- name: Ensure atomic proxies are defined
- hosts: localhost
- roles:
- - role: openshift_atomic
-```
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
deleted file mode 100644
index ea129f514..000000000
--- a/roles/openshift_atomic/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Atomic related tasks
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_openshift
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 7e8e9b679..2c1e88cfb 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -28,10 +28,10 @@
l_epoch_time: "{{ ansible_date_time.epoch }}"
- when: openshift_aws_create_iam_role
- include: iam_role.yml
+ include_tasks: iam_role.yml
- when: openshift_aws_create_launch_config
- include: launch_config.yml
+ include_tasks: launch_config.yml
- when: openshift_aws_create_scale_group
- include: scale_group.yml
+ include_tasks: scale_group.yml
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 0dbeba5a0..fed80b7eb 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -9,7 +9,7 @@
when:
- openshift_deployment_type is undefined
-- include: launch_config_create.yml
+- include_tasks: launch_config_create.yml
with_dict: "{{ l_nodes_to_build }}"
loop_control:
loop_var: launch_config_item
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 91538ed5c..06f649343 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,16 +1,16 @@
---
- when: openshift_aws_create_iam_cert | bool
name: create the iam_cert for elb certificate
- include: iam_cert.yml
+ include_tasks: iam_cert.yml
- when: openshift_aws_create_s3 | bool
name: create s3 bucket for registry
- include: s3.yml
+ include_tasks: s3.yml
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create elbs
- include: elb.yml
+ include_tasks: elb.yml
with_dict: "{{ openshift_aws_elb_dict }}"
vars:
l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
@@ -19,7 +19,7 @@
loop_var: l_elb_dict_item
- name: include scale group creation for master
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
vars:
l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 3349acb7a..8cc75cd0c 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -3,7 +3,7 @@
set_fact:
openshift_node_bootstrap: True
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create instance for ami creation
ec2:
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 3e84666a2..041ed0791 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -27,17 +27,17 @@
set_fact:
openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: include build compute and infra node groups
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
vars:
l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
l_aws_ami_map: "{{ openshift_aws_ami_map }}"
- name: include build node group for extra nodes
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
when: openshift_aws_node_group_config_extra is defined
vars:
l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
@@ -47,4 +47,4 @@
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
- include: wait_for_groups.yml
+ include_tasks: wait_for_groups.yml
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 0cb749dcc..7a3d0fb68 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -31,7 +31,7 @@
source-ami: "{{ amioutput.image_id }}"
- name: copy the ami for encrypted disks
- include: ami_copy.yml
+ include_tasks: ami_copy.yml
vars:
openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 5cc7ae537..43834079e 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,11 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include: security_group_create.yml
+- include_tasks: security_group_create.yml
vars:
l_security_groups: "{{ openshift_aws_node_security_groups }}"
-- include: security_group_create.yml
+- include_tasks: security_group_create.yml
when: openshift_aws_node_security_groups_extra is defined
vars:
l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index d7851d887..c3f86f523 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -4,13 +4,13 @@
when:
- openshift_aws_current_version == openshift_aws_new_version
-- include: provision_nodes.yml
+- include_tasks: provision_nodes.yml
-- include: accept_nodes.yml
+- include_tasks: accept_nodes.yml
-- include: setup_scale_group_facts.yml
+- include_tasks: setup_scale_group_facts.yml
-- include: setup_master_group.yml
+- include_tasks: setup_master_group.yml
vars:
# we do not set etcd here as its limited to 1 or 3
openshift_aws_masters_groups: masters,nodes
diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml
index 82da0639e..631a0455e 100644
--- a/roles/openshift_cli/defaults/main.yml
+++ b/roles/openshift_cli/defaults/main.yml
@@ -4,3 +4,8 @@ system_images_registry_dict:
origin: "docker.io"
system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+
+openshift_use_crio_only: False
+
+l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}"
+l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}"
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 29ed82783..5d2b6abed 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -12,6 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_docker
- when: not skip_docker_role | default(False) | bool
- role: openshift_facts
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 7b046b2c4..140c6ea26 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,10 +1,4 @@
---
-- set_fact:
- l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
- l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}"
-- set_fact:
- l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}"
-
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_cluster_autoscaler/tasks/main.yml b/roles/openshift_cluster_autoscaler/tasks/main.yml
index 173dcf044..ca7dfb885 100644
--- a/roles/openshift_cluster_autoscaler/tasks/main.yml
+++ b/roles/openshift_cluster_autoscaler/tasks/main.yml
@@ -31,7 +31,7 @@
type: role
name: "{{ openshift_cluster_autoscaler_name }}"
-- include: aws.yml
+- include_tasks: aws.yml
when: openshift_cluster_autoscaler_cloud_provider == 'aws'
- name: create the policies
diff --git a/roles/openshift_docker/defaults/main.yml b/roles/openshift_docker/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_docker/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_docker/meta/main.yml b/roles/openshift_docker/meta/main.yml
deleted file mode 100644
index 60efd4e45..000000000
--- a/roles/openshift_docker/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Docker
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_docker_facts
-- role: docker
diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_docker/tasks/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_docker_facts/defaults/main.yml b/roles/openshift_docker_facts/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_docker_facts/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_docker_facts/meta/main.yml b/roles/openshift_docker_facts/meta/main.yml
deleted file mode 100644
index 5b1be7a8d..000000000
--- a/roles/openshift_docker_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Docker Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- { role: openshift_facts }
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
deleted file mode 100644
index 5a3e50678..000000000
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Set docker facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: docker
- local_facts:
- selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}"
- log_driver: "{{ openshift_docker_log_driver | default(None) }}"
- log_options: "{{ openshift_docker_log_options | default(None) }}"
- options: "{{ openshift_docker_options | default(None) }}"
- disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
- hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
- hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
- use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
- use_crio: "{{ openshift_use_crio | default(False) }}"
- - role: node
- local_facts:
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
-- set_fact:
- docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}"
- docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}"
- docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
- docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
- | default(omit) }}"
- docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}"
- docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}"
- docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}"
-
-- set_fact:
- docker_options: "--insecure-registry={{ openshift.docker.hosted_registry_network }} {{ openshift.docker.options | default ('') }}"
- when: openshift.docker.hosted_registry_insecure | default(False) | bool and openshift.docker.hosted_registry_network is defined
- register: hosted_registry_options
-
-- set_fact:
- docker_options: "{{ openshift.docker.options | default(omit) }}"
- when: hosted_registry_options | skipped
diff --git a/roles/openshift_docker_facts/vars/main.yml b/roles/openshift_docker_facts/vars/main.yml
deleted file mode 100644
index 55c04b0c1..000000000
--- a/roles/openshift_docker_facts/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 7cc548f69..0e28fec03 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -13,6 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_etcd_facts
-- role: openshift_docker
- when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 7064d727a..a182d23c5 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -3,4 +3,98 @@ openshift_cli_image_dict:
origin: 'openshift/origin'
openshift-enterprise: 'openshift3/ose'
+openshift_hosted_images_dict:
+ origin: 'openshift/origin-${component}:${version}'
+ openshift-enterprise: 'openshift3/ose-${component}:${version}'
+
openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
+
+# osm_default_subdomain is an old migrated fact, can probably be removed.
+osm_default_subdomain: "router.default.svc.cluster.local"
+openshift_master_default_subdomain: "{{ osm_default_subdomain }}"
+
+openshift_hosted_etcd_storage_nfs_directory: '/exports'
+openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_etcd_storage_volume_name: 'etcd'
+openshift_hosted_etcd_storage_volume_size: '1Gi'
+openshift_hosted_etcd_storage_create_pv: True
+openshift_hosted_etcd_storage_create_pvc: False
+openshift_hosted_etcd_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_hosted_registry_namespace: 'default'
+openshift_hosted_registry_storage_volume_name: 'registry'
+openshift_hosted_registry_storage_volume_size: '5Gi'
+openshift_hosted_registry_storage_create_pv: True
+openshift_hosted_registry_storage_create_pvc: True
+openshift_hosted_registry_storage_nfs_directory: '/exports'
+openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints'
+openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume
+openshift_hosted_registry_storage_glusterfs_readOnly: False
+openshift_hosted_registry_storage_glusterfs_swap: False
+openshift_hosted_registry_storage_glusterfs_swapcopy: True
+openshift_hosted_registry_storage_glusterfs_ips: []
+openshift_hosted_registry_storage_access_modes:
+ - 'ReadWriteMany'
+
+openshift_logging_storage_nfs_directory: '/exports'
+openshift_logging_storage_nfs_options: '*(rw,root_squash)'
+openshift_logging_storage_volume_name: 'logging-es'
+openshift_logging_storage_create_pv: True
+openshift_logging_storage_create_pvc: False
+openshift_logging_storage_access_modes:
+ - ['ReadWriteOnce']
+
+openshift_loggingops_storage_volume_name: 'logging-es-ops'
+openshift_loggingops_storage_volume_size: '10Gi'
+openshift_loggingops_storage_create_pv: True
+openshift_loggingops_storage_create_pvc: False
+openshift_loggingops_storage_nfs_directory: '/exports'
+openshift_loggingops_storage_nfs_options: '*(rw,root_squash)'
+openshift_loggingops_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_metrics_deploy: False
+openshift_metrics_duration: 7
+openshift_metrics_resolution: '10s'
+openshift_metrics_storage_volume_name: 'metrics'
+openshift_metrics_storage_volume_size: '10Gi'
+openshift_metrics_storage_create_pv: True
+openshift_metrics_storage_create_pvc: False
+openshift_metrics_storage_nfs_directory: '/exports'
+openshift_metrics_storage_nfs_options: '*(rw,root_squash)'
+openshift_metrics_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_prometheus_storage_volume_name: 'prometheus'
+openshift_prometheus_storage_volume_size: '10Gi'
+openshift_prometheus_storage_nfs_directory: '/exports'
+openshift_prometheus_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_storage_create_pv: True
+openshift_prometheus_storage_create_pvc: False
+
+openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager'
+openshift_prometheus_alertmanager_storage_volume_size: '10Gi'
+openshift_prometheus_alertmanager_storage_nfs_directory: '/exports'
+openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertmanager_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertmanager_storage_create_pv: True
+openshift_prometheus_alertmanager_storage_create_pvc: False
+
+openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer'
+openshift_prometheus_alertbuffer_storage_volume_size: '10Gi'
+openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports'
+openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertbuffer_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertbuffer_storage_create_pv: True
+openshift_prometheus_alertbuffer_storage_create_pvc: False
+
+
+openshift_router_selector: "region=infra"
+openshift_hosted_router_selector: "{{ openshift_router_selector }}"
+openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9424f3cde..f57b59085 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -11,14 +11,13 @@ import copy
import errno
import json
import re
-import io
import os
import yaml
import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
@@ -51,39 +50,6 @@ EXAMPLES = '''
'''
-def migrate_docker_facts(facts):
- """ Apply migrations for docker facts """
- params = {
- 'common': (
- 'options'
- ),
- 'node': (
- 'log_driver',
- 'log_options'
- )
- }
- if 'docker' not in facts:
- facts['docker'] = {}
- # pylint: disable=consider-iterating-dictionary
- for role in params.keys():
- if role in facts:
- for param in params[role]:
- old_param = 'docker_' + param
- if old_param in facts[role]:
- facts['docker'][param] = facts[role].pop(old_param)
-
- if 'node' in facts and 'portal_net' in facts['node']:
- facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
-
- # log_options was originally meant to be a comma separated string, but
- # we now prefer an actual list, with backward compatibility:
- if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], string_types):
- facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
-
- return facts
-
-
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
@@ -119,24 +85,6 @@ def migrate_node_facts(facts):
return facts
-def migrate_hosted_facts(facts):
- """ Apply migrations for master facts """
- if 'master' in facts:
- if 'router_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
- if 'registry_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
- return facts
-
-
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
@@ -155,10 +103,8 @@ def migrate_admission_plugin_facts(facts):
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
- migrated_facts = migrate_docker_facts(migrated_facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
@@ -445,58 +391,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-# pylint: disable=too-many-branches
-def set_selectors(facts):
- """ Set selectors facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated selectors
- facts if they were not already present
-
- """
- selector = "region=infra"
-
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
- facts['hosted']['router']['selector'] = selector
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
- facts['hosted']['registry']['selector'] = selector
- if 'metrics' not in facts['hosted']:
- facts['hosted']['metrics'] = {}
- if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
- facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts or not isinstance(facts['logging'], dict):
- facts['logging'] = {}
- if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
- facts['logging']['selector'] = None
- if 'etcd' not in facts['hosted']:
- facts['hosted']['etcd'] = {}
- if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
- facts['hosted']['etcd']['selector'] = None
- if 'prometheus' not in facts:
- facts['prometheus'] = {}
- if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
- facts['prometheus']['selector'] = None
- if 'alertmanager' not in facts['prometheus']:
- facts['prometheus']['alertmanager'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
- facts['prometheus']['alertmanager']['selector'] = None
- if 'alertbuffer' not in facts['prometheus']:
- facts['prometheus']['alertbuffer'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
- facts['prometheus']['alertbuffer']['selector'] = None
-
- return facts
-
-
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -641,60 +535,6 @@ def set_aggregate_facts(facts):
return facts
-def set_etcd_facts_if_unset(facts):
- """
- If using embedded etcd, loads the data directory from master-config.yaml.
-
- If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
-
- If anything goes wrong parsing these, the fact will not be set.
- """
- if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- if 'etcd_data_dir' not in etcd_facts:
- try:
- # Parse master config to find actual etcd data dir:
- master_cfg_path = os.path.join(facts['common']['config_base'],
- 'master/master-config.yaml')
- master_cfg_f = open(master_cfg_path, 'r')
- config = yaml.safe_load(master_cfg_f.read())
- master_cfg_f.close()
-
- etcd_facts['etcd_data_dir'] = \
- config['etcdConfig']['storageDirectory']
-
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
- else:
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
- ini_fp = io.StringIO(ini_str)
- config = configparser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
-
- etcd_facts['etcd_data_dir'] = etcd_data_dir
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
-
- return facts
-
-
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, master.registry_url, node.registry_url,
@@ -1104,6 +944,7 @@ def get_version_output(binary, version_cmd):
return output
+# We may need this in the future.
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
@@ -1117,25 +958,6 @@ def get_docker_version_info():
return result
-def get_hosted_registry_insecure():
- """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
- registry is currently insecure.
- """
- hosted_registry_insecure = None
- if os.path.exists('/etc/sysconfig/docker'):
- try:
- ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
- ini_fp = io.StringIO(ini_str)
- config = configparser.RawConfigParser()
- config.readfp(ini_fp)
- options = config.get('root', 'OPTIONS')
- if 'insecure-registry' in options:
- hosted_registry_insecure = True
- except Exception: # pylint: disable=broad-except
- pass
- return hosted_registry_insecure
-
-
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1565,13 +1387,6 @@ def set_container_facts_if_unset(facts):
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
- # If openshift_docker_use_system_container is set and is True ....
- if 'use_system_container' in list(facts['docker'].keys()):
- # use safe_get_bool as the inventory variable may not be a
- # valid boolean on it's own.
- if safe_get_bool(facts['docker']['use_system_container']):
- # ... set the service name to container-engine
- facts['docker']['service_name'] = 'container-engine'
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
@@ -1664,15 +1479,9 @@ class OpenShiftFacts(object):
'buildoverrides',
'cloudprovider',
'common',
- 'docker',
'etcd',
- 'hosted',
'master',
- 'node',
- 'logging',
- 'loggingops',
- 'metrics',
- 'prometheus']
+ 'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1745,7 +1554,6 @@ class OpenShiftFacts(object):
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
- facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
@@ -1755,7 +1563,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
- facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
@@ -1818,200 +1625,9 @@ class OpenShiftFacts(object):
local_quota_per_fsgroup="",
set_node_ip=False)
- if 'docker' in roles:
- docker = dict(disable_push_dockerhub=False,
- options='--log-driver=journald')
- # NOTE: This is a workaround for a dnf output racecondition that can occur in
- # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
- if self.system_facts['ansible_pkg_mgr'] == 'dnf':
- rpm_rebuilddb()
-
- version_info = get_docker_version_info()
- if version_info is not None:
- docker['api_version'] = version_info['api_version']
- docker['version'] = version_info['version']
- docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
- hosted_registry_insecure = get_hosted_registry_insecure()
- if hosted_registry_insecure is not None:
- docker['hosted_registry_insecure'] = hosted_registry_insecure
- docker['service_name'] = 'docker'
- defaults['docker'] = docker
-
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
- if 'hosted' in roles or self.role == 'hosted':
- defaults['hosted'] = dict(
- etcd=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='etcd',
- size='1Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- registry=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='registry',
- size='5Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'),
- glusterfs=dict(
- endpoints='glusterfs-registry-endpoints',
- path='glusterfs-registry-volume',
- ips=[],
- readOnly=False,
- swap=False,
- swapcopy=True),
- host=None,
- access=dict(
- modes=['ReadWriteMany']
- ),
- create_pv=True,
- create_pvc=True
- )
- ),
- router=dict()
- )
-
- defaults['logging'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['loggingops'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['metrics'] = dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertmanager'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertmanager',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertbuffer'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertbuffer',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
return defaults
def guess_host_provider(self):
@@ -2189,12 +1805,6 @@ class OpenShiftFacts(object):
facts_to_set,
additive_facts_to_overwrite)
- if 'docker' in new_local_facts:
- # Convert legacy log_options comma sep string to a list if present:
- if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], string_types):
- new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
-
new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index e70c0c420..b6501d288 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local'
r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
+openshift_hosted_router_namespace: 'default'
+
openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
openshift_hosted_router_edits:
@@ -40,13 +43,14 @@ openshift_hosted_router_edits:
value: 21600
action: put
+openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_routers:
- name: router
replicas: "{{ replicas | default(1) }}"
namespace: default
serviceaccount: router
selector: "{{ openshift_hosted_router_selector | default(None) }}"
- images: "{{ openshift_hosted_router_image | default(None) }}"
+ images: "{{ openshift_hosted_router_registryurl }}"
edits: "{{ openshift_hosted_router_edits }}"
stats_port: 1936
ports:
@@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: []
# Registry #
############
+openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
+penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_routecertificates: {}
+openshift_hosted_registry_routetermination: "passthrough"
+
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
index 7f41529ac..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
@@ -12,7 +12,7 @@ class FilterModule(object):
def get_router_replicas(replicas=None, router_nodes=None):
''' This function will return the number of replicas
based on the results from the defined
- openshift.hosted.router.replicas OR
+ openshift_hosted_router_replicas OR
the query from oc_obj on openshift nodes with a selector OR
default to 1
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 9f2ef4e40..de302c740 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -6,20 +6,20 @@
check_mode: no
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
-- when: openshift.hosted.registry.replicas | default(none) is none
+- when: openshift_hosted_registry_replicas | default(none) is none
block:
- name: Retrieve list of openshift nodes matching registry selector
oc_obj:
state: list
kind: node
- selector: "{{ openshift.hosted.registry.selector | default(omit) }}"
+ selector: "{{ openshift_hosted_registry_selector }}"
register: registry_nodes
- name: set_fact l_node_count to number of nodes matching registry selector
@@ -39,16 +39,13 @@
# just 1:
- name: set_fact l_default_replicas when l_node_count > 0
set_fact:
- l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
+ l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}"
when: l_node_count | int > 0
- name: set openshift_hosted facts
set_fact:
- openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
- openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}"
+ # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group
+ openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}"
- name: Update registry environment variables when pushing via dns
set_fact:
@@ -97,16 +94,14 @@
service_type: ClusterIP
clusterip: '{{ openshift_hosted_registry_clusterip | default(omit) }}'
-- include: secure.yml
- static: no
+- include_tasks: secure.yml
run_once: true
when:
- - not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - not (openshift_docker_hosted_registry_insecure | default(False)) | bool
-- include: storage/object_storage.yml
- static: no
+- include_tasks: storage/object_storage.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'object'
+ - openshift_hosted_registry_storage_kind | default(none) == 'object'
- name: Update openshift_hosted facts for persistent volumes
set_fact:
@@ -115,23 +110,23 @@
pvc_volume_mounts:
- name: registry-storage
type: persistentVolumeClaim
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs']
-- include: storage/glusterfs_endpoints.yml
+- include_tasks: storage/glusterfs_endpoints.yml
when:
- openshift_hosted_registry_storage_glusterfs_ips|length > 0
- - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
name: "{{ openshift_hosted_registry_name }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
selector: "{{ openshift_hosted_registry_selector }}"
- replicas: "{{ openshift_hosted_registry_replicas }}"
+ replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ openshift_hosted_registry_images }}"
+ images: "{{ penshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
@@ -144,14 +139,14 @@
namespace: "{{ openshift_hosted_registry_namespace }}"
- name: Wait for pod (Registry)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
-- include: storage/glusterfs.yml
+- include_tasks: storage/glusterfs.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
+ - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
- name: Delete temp directory
file:
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index dd7053656..4e9219477 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,6 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
@@ -11,16 +11,14 @@
oc_obj:
state: list
kind: node
- namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
- selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+ namespace: "{{ openshift_hosted_router_namespace }}"
+ selector: "{{ openshift_hosted_router_selector }}"
register: router_nodes
- when: openshift.hosted.router.replicas | default(none) is none
+ when: openshift_hosted_router_replicas | default(none) is none
- name: set_fact replicas
set_fact:
- replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
- openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
- openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+ replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
copy:
@@ -42,8 +40,8 @@
signer_key: "{{ openshift_master_config_dir }}/ca.key"
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
- - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ - "{{ openshift_master_default_subdomain }}"
+ - "*.{{ openshift_master_default_subdomain }}"
cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
with_items: "{{ openshift_hosted_routers }}"
@@ -102,7 +100,7 @@
with_items: "{{ openshift_hosted_routers }}"
- name: Wait for pod (Routers)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index 174bc39a4..378ae32dc 100644
--- a/roles/openshift_hosted/tasks/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -1,18 +1,10 @@
---
-- name: Configure facts for docker-registry
- set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
- openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
- openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
-
- name: Include reencrypt route configuration
- include: secure/reencrypt.yml
- static: no
+ include_tasks: secure/reencrypt.yml
when: openshift_hosted_registry_routetermination == 'reencrypt'
- name: Include passthrough route configuration
- include: secure/passthrough.yml
- static: no
+ include_tasks: secure/passthrough.yml
when: openshift_hosted_registry_routetermination == 'passthrough'
- name: Fetch the docker-registry route
@@ -39,7 +31,7 @@
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- - "{{ openshift_hosted_registry_routehost }}"
+ - "{{ openshift_hosted_registry_routehost | default(omit) }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index 7cae67baa..18b2edcc6 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -17,7 +17,7 @@
until:
- "registry_pods.results.results[0]['items'] | count > 0"
# There must be as many matching pods with 'Ready' status True as there are expected replicas
- - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
delay: 10
retries: "{{ (600 / 10) | int }}"
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
@@ -60,7 +60,7 @@
- name: Copy current registry contents to new GlusterFS volume
command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
- when: openshift.hosted.registry.storage.glusterfs.swapcopy
+ when: openshift_hosted_registry_storage_glusterfs_swapcopy
- name: Swap new GlusterFS registry volume
oc_volume:
@@ -68,7 +68,7 @@
name: "{{ openshift_hosted_registry_name }}"
vol_name: registry-storage
mount_type: pvc
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
- name: Deactivate registry maintenance mode
oc_env:
@@ -77,7 +77,7 @@
state: absent
env_vars:
- REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
- when: openshift.hosted.registry.storage.glusterfs.swap
+ when: openshift_hosted_registry_storage_glusterfs_swap
- name: Unmount registry volume and clean up mount point/fstab
mount:
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 0f4381748..bd7181c17 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -10,7 +10,7 @@
dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry service and endpoint
- command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}"
+ command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..a8c26fb51 100644
--- a/roles/openshift_hosted/tasks/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
@@ -1,6 +1,6 @@
---
-- include: s3.yml
- when: openshift.hosted.registry.storage.provider == 's3'
+- include_tasks: s3.yml
+ when: openshift_hosted_registry_storage_provider == 's3'
- name: Ensure the registry secret exists
oc_secret:
diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 8e905d905..4c100ee4e 100644
--- a/roles/openshift_hosted/tasks/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -2,8 +2,8 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 8fc70cecb..ed97d539c 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -1,19 +1 @@
---
-# openshift_*_selector variables have been deprecated in favor of
-# openshift_hosted_*_selector variables.
-- set_fact:
- openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined
-- set_fact:
- openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined
-
-- name: Set hosted facts
- openshift_facts:
- role: "{{ item }}"
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md
deleted file mode 100644
index c2af3c494..000000000
--- a/roles/openshift_hosted_metrics/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-OpenShift Metrics with Hawkular
-====================
-
-OpenShift Metrics Installation
-
-Requirements
-------------
-
-* Ansible 2.2
-* It requires subdomain fqdn to be set.
-* If persistence is enabled, then it also requires NFS.
-
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value | |
-|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
-| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed |
-| openshift_hosted_metrics_public_url | null | Hawkular metrics public url |
-| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. |
-| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir |
-| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size |
-| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. |
-| openshift_hosted_metrics_duration | `7` | Metrics query duration |
-| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
-
-
-Dependencies
-------------
-openshift_facts
-openshift_examples
-openshift_master_facts
-
-Example Playbook
-----------------
-
-```
-- name: Configure openshift-metrics
- hosts: oo_first_master
- roles:
- - role: openshift_hosted_metrics
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_metrics/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
deleted file mode 100644
index 074b72942..000000000
--- a/roles/openshift_hosted_metrics/handlers/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- notify: Verify API Server
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
deleted file mode 100644
index debca3ca6..000000000
--- a/roles/openshift_hosted_metrics/meta/main.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: David Martín
- description:
- company:
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: openshift_examples }
-- { role: openshift_facts }
-- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
deleted file mode 100644
index 15dd1bd54..000000000
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ /dev/null
@@ -1,132 +0,0 @@
----
-
-- name: Test if metrics-deployer service account exists
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace=openshift-infra
- get serviceaccount metrics-deployer -o json
- register: serviceaccount
- changed_when: false
- failed_when: false
-
-- name: Create metrics-deployer Service Account
- shell: >
- echo {{ metrics_deployer_sa | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- create -f -
- when: serviceaccount.rc == 1
-
-- name: Test edit permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
- register: edit_rolebindings
- changed_when: false
-
-- name: Add edit permission to the openshift-infra project to metrics-deployer SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user edit
- system:serviceaccount:openshift-infra:metrics-deployer
- when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
-
-- name: Test hawkular view permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
- register: view_rolebindings
- changed_when: false
-
-- name: Add view permissions to hawkular SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user view
- system:serviceaccount:openshift-infra:hawkular
- when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
-
-- name: Test cluster-reader permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
- register: cluster_reader_clusterrolebindings
- changed_when: false
-
-- name: Add cluster-reader permission to the openshift-infra project to heapster SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-cluster-role-to-user cluster-reader
- system:serviceaccount:openshift-infra:heapster
- when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
-
-- name: Create metrics-deployer secret
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- secrets new metrics-deployer nothing=/dev/null
- register: metrics_deployer_secret
- changed_when: metrics_deployer_secret.rc == 0
- failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
-
-# TODO: extend this to allow user passed in certs or generating cert with
-# OpenShift CA
-- name: Build metrics deployer command
- set_fact:
- deployer_cmd: "{{ openshift.common.client_binary }} process -f \
- {{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \
- -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
- -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
- -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
- -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
- {{ image_prefix }} \
- {{ image_version }} \
- -v MODE={{ deployment_mode }} \
- | {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_hosted_metrics_kubeconfig }} \
- create -o name -f -"
-
-- name: Deploy Metrics
- shell: "{{ deployer_cmd }}"
- register: deploy_metrics
- failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
- changed_when: deploy_metrics.rc == 0
-
-- set_fact:
- deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
-
-# TODO: re-enable this once the metrics deployer validation issue is fixed
-# when using dynamically provisioned volumes
-- name: "Wait for image pull and deployer pod"
- shell: >
- {{ openshift.common.client_binary }}
- --namespace openshift-infra
- --config={{ openshift_hosted_metrics_kubeconfig }}
- get {{ deploy_metrics.stdout }}
- register: deploy_result
- until: "{{ 'Completed' in deploy_result.stdout }}"
- failed_when: False
- retries: 60
- delay: 10
-
-- name: Configure master for metrics
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.metricsPublicURL
- yaml_value: "{{ openshift_hosted_metrics_deploy_url }}"
- notify: restart master
diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
deleted file mode 100644
index 5ce8aa92b..000000000
--- a/roles/openshift_hosted_metrics/tasks/main.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Record kubeconfig tmp dir
- set_fact:
- openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
- changed_when: False
-
-- name: Set hosted metrics facts
- openshift_facts:
- role: hosted
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.metrics.*'
-
-- set_fact:
- metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
- metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
- metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
- image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
-
-
-- name: Check for existing metrics pods
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l {{ item }} | grep -q Running
- register: metrics_pods_status
- with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
- failed_when: false
- changed_when: false
-
-- name: Check for previous deployer
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
- register: metrics_deployer_status
- failed_when: false
- changed_when: false
-
-- name: Record current deployment status
- set_fact:
- greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
- failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
- metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
-
-- name: Set deployment mode
- set_fact:
- deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
-
-# TODO: handle non greenfield deployments in the future
-- include: install.yml
- when: greenfield
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
deleted file mode 100644
index 6c207d6ac..000000000
--- a/roles/openshift_hosted_metrics/vars/main.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-
-metrics_deployer_sa:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
-
-
-hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
-
-hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
-
-hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-
-metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index 239b16427..f9c16ba40 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -26,6 +26,8 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
cond: "{{ r_openshift_lb_use_nuage | bool }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+
# NOTE
# r_openshift_lb_use_nuage_default may be defined external to this role.
# openshift_use_nuage, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 72182fcdd..0343a7eb0 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -1,11 +1,11 @@
[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 497c6e0c5..2f1aa061f 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi
openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_cpu_request: 100m
@@ -54,7 +54,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_cpu_request: 100m
@@ -109,7 +109,7 @@ openshift_logging_es_config: {}
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
-openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es certs
@@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
-openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es-ops certs
@@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}"
openshift_logging_mux_port: 24284
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_memory_limit: 512Mi
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index 074b72942..e0329ee7c 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,7 +1,7 @@
---
- name: restart master api
systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
@@ -11,7 +11,7 @@
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index f526fd734..082c0128f 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -36,7 +36,7 @@
- top_dir: '{{generated_certs_dir}}'
when: not signing_conf_file.stat.exists
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -45,7 +45,7 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -53,14 +53,14 @@
hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
when: openshift_logging_use_mux | bool
-- include: procure_shared_key.yaml
+- include_tasks: procure_shared_key.yaml
loop_control:
loop_var: shared_key_info
with_items:
- procure_component: mux
when: openshift_logging_use_mux | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -68,7 +68,7 @@
hostnames: "es, {{openshift_logging_es_hostname}}"
when: openshift_logging_es_allow_external | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -109,7 +109,7 @@
- not ca_cert_srl_file.stat.exists
- name: Generate PEM certs
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.fluentd
- system.logging.kibana
@@ -119,7 +119,7 @@
loop_var: node_name
- name: Generate PEM cert for mux
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.mux
loop_control:
@@ -127,7 +127,7 @@
when: openshift_logging_use_mux | bool
- name: Generate PEM cert for Elasticsearch external route
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.es
loop_control:
@@ -135,7 +135,7 @@
when: openshift_logging_es_allow_external | bool
- name: Creating necessary JKS certs
- include: generate_jks.yaml
+ include_tasks: generate_jks.yaml
# TODO: make idempotent
- name: Generate proxy session
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 2fefdc894..bb8ebec6b 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -52,7 +52,7 @@
changed_when: False
check_mode: no
-- include: generate_certs.yaml
+- include_tasks: generate_certs.yaml
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -250,7 +250,7 @@
when:
- openshift_logging_use_ops | bool
-- include: annotate_ops_projects.yaml
+- include_tasks: annotate_ops_projects.yaml
## Curator
- include_role:
@@ -311,4 +311,4 @@
openshift_logging_install_eventrouter | default(false) | bool
-- include: update_master_config.yaml
+- include_tasks: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 7f8e88036..91db457d1 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -19,11 +19,11 @@
check_mode: no
become: no
-- include: install_logging.yaml
+- include_tasks: install_logging.yaml
when:
- openshift_logging_install_logging | default(false) | bool
-- include: delete_logging.yaml
+- include_tasks: delete_logging.yaml
when:
- not openshift_logging_install_logging | default(false) | bool
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
index 6752fb7f9..d4635aab0 100644
--- a/roles/openshift_logging_curator/meta/main.yaml
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 7ddf57450..e7ef5ff22 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -12,7 +12,7 @@
openshift_logging_curator_image_prefix: "{{ openshift_logging_curator_image_prefix | default(__openshift_logging_curator_image_prefix) }}"
openshift_logging_curator_image_version: "{{ openshift_logging_curator_image_version | default(__openshift_logging_curator_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 097270772..6a9a6539c 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 770892d52..8f2050043 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -30,7 +30,7 @@
openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_elasticsearch_image_prefix | default(__openshift_logging_elasticsearch_image_prefix) }}"
openshift_logging_elasticsearch_image_version: "{{ openshift_logging_elasticsearch_image_version | default(__openshift_logging_elasticsearch_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
index b1f93eeb9..96b181d61 100644
--- a/roles/openshift_logging_eventrouter/tasks/main.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -12,8 +12,8 @@
openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_eventrouter_image_prefix | default(__openshift_logging_eventrouter_image_prefix) }}"
openshift_logging_eventrouter_image_version: "{{ openshift_logging_eventrouter_image_version | default(__openshift_logging_eventrouter_image_version) }}"
-- include: "{{ role_path }}/tasks/install_eventrouter.yaml"
+- include_tasks: install_eventrouter.yaml
when: openshift_logging_install_eventrouter | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_eventrouter.yaml"
+- include_tasks: delete_eventrouter.yaml
when: not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
index 2003aacb2..89c98204f 100644
--- a/roles/openshift_logging_fluentd/meta/main.yaml
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index f8683ab75..87eedfb4b 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -47,7 +47,7 @@
openshift_logging_fluentd_image_prefix: "{{ openshift_logging_fluentd_image_prefix | default(__openshift_logging_fluentd_image_prefix) }}"
openshift_logging_fluentd_image_version: "{{ openshift_logging_fluentd_image_version | default(__openshift_logging_fluentd_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
@@ -216,7 +216,7 @@
openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
-- include: label_and_wait.yaml
+- include_tasks: label_and_wait.yaml
vars:
node: "{{ fluentd_host }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index 6cdf7c8f3..899193838 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain) }}"
openshift_logging_kibana_es_host: "logging-es"
openshift_logging_kibana_es_port: 9200
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
index 89e08abc0..d97586a37 100644
--- a/roles/openshift_logging_kibana/meta/main.yaml
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 9d99114c5..77bf8042a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -15,7 +15,7 @@
openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_kibana_proxy_image_prefix | default(__openshift_logging_kibana_proxy_image_prefix) }}"
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_kibana_proxy_image_version | default(__openshift_logging_kibana_proxy_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index cd15da939..1e6c501bf 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
index f40beb79d..f271d8d7d 100644
--- a/roles/openshift_logging_mux/meta/main.yaml
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 242d92188..68948bce2 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -20,7 +20,7 @@
openshift_logging_mux_image_prefix: "{{ openshift_logging_mux_image_prefix | default(__openshift_logging_mux_image_prefix) }}"
openshift_logging_mux_image_version: "{{ openshift_logging_mux_image_version | default(__openshift_logging_mux_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 3bade9e8c..f212dba7c 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -3,7 +3,7 @@
# Users, projects, and privileges
- name: Run pre-install Management validation checks
- include: validate.yml
+ include_tasks: validate.yml
# This creates a service account allowing Container Provider
# integration (managing OCP/Origin via MIQ/Management)
@@ -18,18 +18,18 @@
display_name: "{{ openshift_management_project_description }}"
- name: Create and Authorize Management Accounts
- include: accounts.yml
+ include_tasks: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
- name: Determine the correct NFS host if required
- include: storage/nfs_server.yml
+ include_tasks: storage/nfs_server.yml
when: openshift_management_storage_class in ['nfs', 'nfs_external']
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
- name: Create required NFS exports for Management app storage
- include: storage/nfs.yml
+ include_tasks: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
#---------------------------------------------------------------------
@@ -56,14 +56,14 @@
######################################################################
# APPLICATION TEMPLATE
- name: Install the Management app and PV templates
- include: template.yml
+ include_tasks: template.yml
######################################################################
# APP & DB Storage
# For local/external NFS backed installations
- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}"
- include: storage/create_nfs_pvs.yml
+ include_tasks: storage/create_nfs_pvs.yml
when:
- openshift_management_storage_class in ['nfs', 'nfs_external']
diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml
index d8bf7aa3e..a3675b29b 100644
--- a/roles/openshift_management/tasks/storage/storage.yml
+++ b/roles/openshift_management/tasks/storage/storage.yml
@@ -1,3 +1,3 @@
---
-- include: nfs.yml
+- include_tasks: nfs.yml
when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 8e4a46ebb..38b2fd8b8 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -52,6 +52,8 @@ openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container |
containerized_svc_dir: "/usr/lib/systemd/system"
ha_svc_template_path: "native-cluster"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 359536202..e6b8b8ac8 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -5,7 +5,6 @@
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
notify:
- Verify API Server
@@ -18,7 +17,6 @@
until: result.rc == 0
when:
- not (master_controllers_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a1cda2ad4..bf0cbbf18 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,3 +15,4 @@ dependencies:
- role: lib_openshift
- role: lib_utils
- role: lib_os_firewall
+- role: openshift_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index e52cd6231..5f4e6df71 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,25 +11,6 @@
- openshift_master_oauth_grant_method is defined
- openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
-# HA Variable Validation
-- fail:
- msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
- when:
- - openshift.master.ha | bool
- - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
-- fail:
- msg: "openshift_master_cluster_password must be set for multi-master installations"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
-- fail:
- msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift.common.is_containerized | bool
-
- name: Open up firewall ports
import_tasks: firewall.yml
@@ -226,7 +207,6 @@
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -241,14 +221,12 @@
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
- pause:
seconds: 15
when:
- openshift.master.ha | bool
- - openshift.master.cluster_method == 'native'
- name: Start and enable master api all masters
systemd:
@@ -256,7 +234,6 @@
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -271,14 +248,12 @@
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
- include_tasks: check_master_api_is_ready.yml
when:
- - openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
- name: Start and enable master controller service
@@ -286,8 +261,6 @@
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
- when:
- - openshift.master.cluster_method == 'native'
register: l_start_result
until: not l_start_result | failed
retries: 1
@@ -301,30 +274,6 @@
- name: Set fact master_controllers_service_status_changed
set_fact:
master_controllers_service_status_changed: "{{ l_start_result | changed }}"
- when:
- - openshift.master.cluster_method == 'native'
-
-- name: Install cluster packages
- package: name=pcs state=present
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
- register: l_install_result
- until: l_install_result | success
-
-- name: Start and enable cluster service
- systemd:
- name: pcsd
- enabled: yes
- state: started
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
-
-- name: Set the cluster user password
- shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
- when:
- - l_install_result | changed
- name: node bootstrap settings
include_tasks: bootstrap.yml
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index c95f562d0..ca04d2243 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -33,7 +33,7 @@
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: master_oreg_auth_credentials_create
+ register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
- restart master controllers
@@ -45,4 +45,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
+ - >
+ (master_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or master_oreg_auth_credentials_create.changed
+ or master_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 23386f11b..450f6d803 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull master system container image
command: >
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 9d11ed574..ee76413e3 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -25,7 +25,6 @@
state: absent
ignore_errors: true
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
# This is the image used for both HA and non-HA clusters:
@@ -43,7 +42,6 @@
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
with_items:
- api
@@ -63,22 +61,17 @@
- api
- controllers
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
register: l_master_api_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
register: master_api_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
@@ -87,14 +80,11 @@
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master api
- name: Restore Master API Proxy Config Options
when:
- - openshift.master.cluster_method == "native"
- l_master_api_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
@@ -105,7 +95,6 @@
- name: Restore Master API AWS Options
when:
- - openshift.master.cluster_method == "native"
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
@@ -117,16 +106,12 @@
- name: Preserve Master Controllers Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
register: master_controllers_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
register: master_controllers_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
@@ -135,8 +120,6 @@
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master controllers
@@ -146,7 +129,6 @@
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
@@ -157,6 +139,5 @@
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 5d4a99c97..cec3d3fb1 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
Before={{ openshift.common.service_type }}-node.service
-After={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
@@ -33,5 +33,5 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index f93f3b565..a0248151d 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
Wants={{ openshift.common.service_type }}-master-api.service
After={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
@@ -32,4 +32,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index a0f00e545..92668b227 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -120,7 +120,7 @@ kubernetesMasterConfig:
- application/vnd.kubernetes.protobuf
{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
- masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
+ masterCount: {{ openshift.master.master_count }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
@@ -204,7 +204,7 @@ projectConfig:
mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
uidAllocatorRange: "{{ osm_uid_allocator_range }}"
routingConfig:
- subdomain: "{{ openshift_master_default_subdomain | default("") }}"
+ subdomain: "{{ openshift_master_default_subdomain }}"
serviceAccountConfig:
limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
managedNames:
diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md
deleted file mode 100644
index 58dd19ac3..000000000
--- a/roles/openshift_master_cluster/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Master Cluster
-========================
-
-TODO
-
-Requirements
-------------
-
-* Ansible 2.2
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml
deleted file mode 100644
index c452b165e..000000000
--- a/roles/openshift_master_cluster/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies: []
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
deleted file mode 100644
index 1b94598dd..000000000
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- fail:
- msg: This role requires that openshift_master_cluster_vip is set
- when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip
-- fail:
- msg: This role requires that openshift_master_cluster_public_vip is set
- when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip
-
-- name: Authenticate to the cluster
- command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }}
-
-- name: Create the cluster
- command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }}
-
-- name: Start the cluster
- command: pcs cluster start --all
-
-- name: Enable the cluster on all nodes
- command: pcs cluster enable --all
-
-- name: Set default resource stickiness
- command: pcs resource defaults resource-stickiness=100
-
-- name: Add the cluster VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
-
-- name: Add the cluster public VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
- when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
-
-- name: Add the cluster master service resource
- command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
-
-- name: Disable stonith
- command: pcs property set stonith-enabled=false
-
-- name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: "{{ openshift.master.api_port }}"
- state: started
- timeout: 180
- delay: 90
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
deleted file mode 100644
index 41bfc72cb..000000000
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- fail:
- msg: "Not possible on atomic hosts for now"
- when: openshift.common.is_containerized | bool
-
-- name: Test if cluster is already configured
- command: pcs status
- register: pcs_status
- changed_when: false
- failed_when: false
- when: openshift.master.cluster_method == "pacemaker"
-
-- include_tasks: configure.yml
- when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index d0dcdae4b..a89f48afa 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,5 +1,4 @@
---
-openshift_master_default_subdomain: "router.default.svc.cluster.local"
openshift_master_admission_plugin_config:
openshift.io/ImagePolicy:
configuration:
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index c827f2d26..ff15f693b 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -485,31 +485,6 @@ class FilterModule(object):
Dumper=AnsibleDumper))
@staticmethod
- def validate_pcs_cluster(data, masters=None):
- ''' Validates output from "pcs status", ensuring that each master
- provided is online.
- Ex: data = ('...',
- 'PCSD Status:',
- 'master1.example.com: Online',
- 'master2.example.com: Online',
- 'master3.example.com: Online',
- '...')
- masters = ['master1.example.com',
- 'master2.example.com',
- 'master3.example.com']
- returns True
- '''
- if not issubclass(type(data), string_types):
- raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
- if not issubclass(type(masters), list):
- raise errors.AnsibleFilterError("|failed expects masters is a list")
- valid = True
- for master in masters:
- if "{0}: Online".format(master) not in data:
- valid = False
- return valid
-
- @staticmethod
def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
@@ -553,6 +528,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
- "validate_pcs_cluster": self.validate_pcs_cluster,
"certificates_to_synchronize": self.certificates_to_synchronize,
"oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 20cc5358e..0cb87dcaa 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,14 +1,8 @@
---
-# Ensure the default sub-domain is set:
-- name: Migrate legacy osm_default_subdomain fact
- set_fact:
- openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}"
- when: openshift_master_default_subdomain is not defined
-
- name: Verify required variables are set
fail:
msg: openshift_master_default_subdomain must be set to deploy metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == ""
+ when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == ""
# NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
# to properly configure the master-config.yaml file.
@@ -20,7 +14,7 @@
- name: Set g_metrics_hostname
set_fact:
g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
- | default('hawkular-metrics.' ~ (openshift_master_default_subdomain))
+ | default('hawkular-metrics.' ~ openshift_master_default_subdomain)
| oo_hostname_from_url }}"
- set_fact:
@@ -31,7 +25,6 @@
openshift_facts:
role: master
local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index 074b72942..e0329ee7c 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,7 +1,7 @@
---
- name: restart master api
systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
@@ -11,7 +11,7 @@
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
index edb854467..1aa7e7079 100644
--- a/roles/openshift_nfs/tasks/setup.yml
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install nfs-utils
package: name=nfs-utils state=present
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 5a0c09f5c..f3867fe4a 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -101,8 +101,11 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_use_crio: False
openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+
# NOTE
# r_openshift_node_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 927d107c6..70057c7f3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,7 +17,6 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
when: not (openshift_node_upgrade_in_progress | default(False))
-- role: openshift_docker
- role: openshift_cloud_provider
when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_utils
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d46b1f9c3..d9f3e920d 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -4,9 +4,9 @@
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled')
- deployment_type == 'openshift-enterprise'
- - not openshift_use_crio | default(false)
+ - not openshift_use_crio
-- include: dnsmasq.yml
+- include_tasks: dnsmasq.yml
- name: setup firewall
import_tasks: firewall.yml
@@ -44,13 +44,6 @@
- name: include node installer
include_tasks: install.yml
-- name: Restart cri-o
- systemd:
- name: cri-o
- enabled: yes
- state: restarted
- when: openshift_use_crio | default(false)
-
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
name: NetworkManager
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 73dc9e130..98a391890 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull node system container image
command: >
@@ -16,6 +12,6 @@
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
+ - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift.common.service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 8c3548475..b61bc84c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,19 +1,11 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
-
-- set_fact:
l_service_name: "cri-o"
- when: l_use_crio
+ when: openshift_use_crio
- set_fact:
- l_service_name: "{{ openshift.docker.service_name }}"
- when: not l_use_crio
-
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
+ l_service_name: "{{ openshift_docker_service_name }}"
+ when: not openshift_use_crio
- name: Pre-pull OpenVSwitch system container image
command: >
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index f5428867a..ab43ec049 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
- openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
+ register: node_oreg_auth_credentials_create_alt
notify:
- restart node
@@ -43,4 +43,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
+ - >
+ (node_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or node_oreg_auth_credentials_create.changed
+ or node_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index a4fa51172..3f1abceab 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -13,19 +13,15 @@
- name: Reload systemd to ensure latest unit files
command: systemctl daemon-reload
-- name: Restart docker
+- name: Restart container runtime
service:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: started
register: docker_start_result
until: not docker_start_result | failed
retries: 3
delay: 30
-- name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Start services
service: name={{ item }} state=started
with_items:
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 7602d8ee6..da751bd65 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -1,14 +1,14 @@
[Unit]
Description=OpenShift Node
-After={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
-Wants={{ openshift.docker.service_name }}.service
+Wants={{ openshift_docker_service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index d452cc45c..16fdde02e 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -16,7 +16,7 @@ imageConfig:
latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift_use_crio | default(False) %}
+{% if openshift_use_crio %}
container-runtime:
- remote
container-runtime-endpoint:
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index fa7238849..5964ac095 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,9 +1,9 @@
[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+After={{ openshift_docker_service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
-{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 561aa01f4..3b33ca542 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,9 +1,9 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
After=openvswitch.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
PartOf=openvswitch.service
@@ -26,7 +26,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
--rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
- -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
-v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
@@ -48,4 +48,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 34aaaabd6..37f091c76 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
index 455f26f30..b42b75be9 100644
--- a/roles/openshift_node_certificates/defaults/main.yml
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -1,3 +1,5 @@
---
openshift_node_cert_expire_days: 730
openshift_ca_host: ''
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index ef66bf9ca..0686ac101 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -6,7 +6,7 @@
- name: check for container runtime after updating ca trust
command: >
- systemctl -q is-active {{ openshift.docker.service_name }}.service
+ systemctl -q is-active {{ openshift_docker_service_name }}.service
register: l_docker_installed
# An rc of 0 indicates that the container runtime service is
# running. We will restart it by notifying the restart handler since
@@ -18,7 +18,7 @@
- name: restart container runtime after updating ca trust
systemd:
- name: "{{ openshift.docker.service_name }}"
+ name: "{{ openshift_docker_service_name }}"
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
register: l_docker_restart_docker_in_cert_result
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index d33d09980..c234a3000 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -15,7 +15,6 @@
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
node_image: "{{ osn_image | default(None) }}"
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 5f182e0d6..929b76f54 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -4,11 +4,9 @@ openshift_openstack_stack_state: 'present'
openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0
openshift_openstack_node_ingress_cidr: 0.0.0.0/0
openshift_openstack_lb_ingress_cidr: 0.0.0.0/0
-openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0
openshift_openstack_num_etcd: 0
openshift_openstack_num_masters: 1
openshift_openstack_num_nodes: 1
-openshift_openstack_num_dns: 0
openshift_openstack_num_infra: 1
openshift_openstack_dns_nameservers: []
openshift_openstack_nodes_to_remove: []
@@ -45,7 +43,6 @@ openshift_openstack_container_storage_setup:
# populate-dns
openshift_openstack_dns_records_add: []
-openshift_openstack_external_nsupdate_keys: {}
openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
openshift_openstack_app_subdomain: "apps"
@@ -60,20 +57,17 @@ openshift_openstack_infra_hostname: infra-node
openshift_openstack_node_hostname: app-node
openshift_openstack_lb_hostname: lb
openshift_openstack_etcd_hostname: etcd
-openshift_openstack_dns_hostname: dns
openshift_openstack_keypair_name: openshift
openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}"
-openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}"
-openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_provider_network_name: null
openshift_openstack_external_network_name: null
openshift_openstack_private_network: >-
@@ -89,8 +83,5 @@ openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_si
openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_etcd_volume_size: 2
-openshift_openstack_dns_volume_size: 1
openshift_openstack_lb_volume_size: 5
-openshift_openstack_use_bastion: false
-openshift_openstack_ui_ssh_tunnel: false
openshift_openstack_ephemeral_volumes: false
diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml
index 57c7238d1..30996cc47 100644
--- a/roles/openshift_openstack/tasks/check-prerequisites.yml
+++ b/roles/openshift_openstack/tasks/check-prerequisites.yml
@@ -32,10 +32,12 @@
command: python -c "import dns"
ignore_errors: yes
register: pythondns_result
+ when: openshift_openstack_external_nsupdate_keys is defined
- name: Check if python-dns is installed
assert:
that: 'pythondns_result.rc == 0'
msg: "Python module python-dns is not installed"
+ when: openshift_openstack_external_nsupdate_keys is defined
# Check jinja2
- name: Try to import jinja2 module
@@ -85,21 +87,19 @@
msg: "Keypair {{ openshift_openstack_keypair_name }} is not available"
# Check that custom images are available
-- include: custom_image_check.yaml
+- include_tasks: custom_image_check.yaml
with_items:
- "{{ openshift_openstack_master_image }}"
- "{{ openshift_openstack_infra_image }}"
- "{{ openshift_openstack_node_image }}"
- "{{ openshift_openstack_lb_image }}"
- "{{ openshift_openstack_etcd_image }}"
- - "{{ openshift_openstack_dns_image }}"
# Check that custom flavors are available
-- include: custom_flavor_check.yaml
+- include_tasks: custom_flavor_check.yaml
with_items:
- "{{ openshift_openstack_master_flavor }}"
- "{{ openshift_openstack_infra_flavor }}"
- "{{ openshift_openstack_node_flavor }}"
- "{{ openshift_openstack_lb_flavor }}"
- "{{ openshift_openstack_etcd_flavor }}"
- - "{{ openshift_openstack_dns_flavor }}"
diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml
deleted file mode 100644
index e1a18425f..000000000
--- a/roles/openshift_openstack/tasks/hostname.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Setting Hostname Fact
- set_fact:
- new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}"
-
-- name: Setting FQDN Fact
- set_fact:
- new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}"
-
-- name: Setting hostname and DNS domain
- hostname: name="{{ new_fqdn }}"
-
-- name: Check for cloud.cfg
- stat: path=/etc/cloud/cloud.cfg
- register: cloud_cfg
-
-- name: Prevent cloud-init updates of hostname/fqdn (if applicable)
- lineinfile:
- dest: /etc/cloud/cloud.cfg
- state: present
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: '^ - set_hostname', line: '# - set_hostname' }
- - { regexp: '^ - update_hostname', line: '# - update_hostname' }
- when: cloud_cfg.stat.exists == True
diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml
index 89e58d830..59df2e396 100644
--- a/roles/openshift_openstack/tasks/node-configuration.yml
+++ b/roles/openshift_openstack/tasks/node-configuration.yml
@@ -4,8 +4,6 @@
msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'"
when: ansible_selinux.config_mode != "enforcing"
-- include: hostname.yml
+- include_tasks: container-storage-setup.yml
-- include: container-storage-setup.yml
-
-- include: node-network.yml
+- include_tasks: node-network.yml
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index c03aceb94..eae4967f7 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -30,7 +30,6 @@
nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}"
nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['private'] is defined
@@ -44,6 +43,8 @@
key_secret: "{{ nsupdate_key_secret_private }}"
key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}"
entries: "{{ private_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['private'] is defined
- name: "Generate list of public A records"
set_fact:
@@ -63,15 +64,6 @@
when:
- hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- openshift_openstack_num_masters == 1
- - not openshift_openstack_use_bastion|bool
-
-- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)"
- set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}"
- when:
- - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- - openshift_openstack_num_masters == 1
- - openshift_openstack_use_bastion|bool
- name: "Add public master cluster hostname records to the public A records (multi-master)"
set_fact:
@@ -87,7 +79,6 @@
nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}"
nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the public Add section for DNS"
@@ -100,11 +91,13 @@
key_secret: "{{ nsupdate_key_secret_public }}"
key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}"
entries: "{{ public_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the final openshift_openstack_dns_records_add"
set_fact:
- openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}"
+ openshift_openstack_dns_records_add: "{{ private_named_records|default([]) + public_named_records|default([]) }}"
- name: "Add DNS A records"
@@ -120,7 +113,7 @@
# TODO(shadower): add a cleanup playbook that removes these records, too!
state: present
with_subelements:
- - "{{ openshift_openstack_dns_records_add | default({}) }}"
+ - "{{ openshift_openstack_dns_records_add | default([]) }}"
- entries
register: nsupdate_add_result
until: nsupdate_add_result|succeeded
diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml
index dccbe334c..b774bd620 100644
--- a/roles/openshift_openstack/tasks/provision.yml
+++ b/roles/openshift_openstack/tasks/provision.yml
@@ -1,6 +1,6 @@
---
- name: Generate the templates
- include: generate-templates.yml
+ include_tasks: generate-templates.yml
when:
- openshift_openstack_stack_state == 'present'
@@ -17,7 +17,7 @@
meta: refresh_inventory
- name: CleanUp
- include: cleanup.yml
+ include_tasks: cleanup.yml
when:
- openshift_openstack_stack_state == 'present'
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 0e7538629..8d13eb81e 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -54,25 +54,8 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
-{% if openshift_openstack_num_dns|int > 0 %}
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ips:
- description: Floating IPs of the DNS
- value: { get_attr: [ dns, floating_ip ] }
-
- dns_private_ips:
- description: Private IPs of the DNS
- value: { get_attr: [ dns, private_ip ] }
-{% endif %}
-
conditions:
- no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %}
+ no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %}
resources:
@@ -180,13 +163,6 @@ resources:
port_range_min: 22
port_range_max: 22
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% if openshift_openstack_use_bastion|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }}
-{% endif %}
- direction: ingress
protocol: icmp
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
@@ -443,44 +419,7 @@ resources:
port_range_min: 443
port_range_max: 443
-{% if openshift_openstack_num_dns|int > 0 %}
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- rules:
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
-{% endif %}
-
-{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %}
+{% if openshift_openstack_num_masters|int > 1 %}
lb-secgrp:
type: OS::Neutron::SecurityGroup
properties:
@@ -491,20 +430,13 @@ resources:
protocol: tcp
port_range_min: {{ openshift_master_api_port | default(8443) }}
port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
-{% if openshift_openstack_ui_ssh_tunnel|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: {{ openshift_master_api_port | default(8443) }}
- port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% endif %}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %}
- direction: ingress
protocol: tcp
port_range_min: {{ openshift_master_console_port | default(8443) }}
port_range_max: {{ openshift_master_console_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% endif %}
{% endif %}
@@ -553,7 +485,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_etcd_volume_size }}
@@ -685,7 +617,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_master_volume_size }}
@@ -755,7 +687,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_node_volume_size }}
@@ -818,9 +750,6 @@ resources:
{% else %}
- { get_resource: node-secgrp }
{% endif %}
-{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %}
- - { get_resource: lb-secgrp }
-{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
{% if not openshift_openstack_provider_network_name %}
@@ -835,54 +764,3 @@ resources:
depends_on:
- interface
{% endif %}
-
-{% if openshift_openstack_num_dns|int > 0 %}
- dns:
- type: OS::Heat::ResourceGroup
- properties:
- count: {{ openshift_openstack_num_dns }}
- resource_def:
- type: server.yaml
- properties:
- name:
- str_replace:
- template: k8s_type-%index%.cluster_id
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- k8s_type: {{ openshift_openstack_dns_hostname }}
- cluster_env: {{ openshift_openstack_public_dns_domain }}
- cluster_id: {{ openshift_openstack_stack_name }}
- group:
- str_replace:
- template: k8s_type.cluster_id
- params:
- k8s_type: dns
- cluster_id: {{ openshift_openstack_stack_name }}
- type: dns
- image: {{ openshift_openstack_dns_image }}
- flavor: {{ openshift_openstack_dns_flavor }}
- key_name: {{ openshift_openstack_keypair_name }}
-{% if openshift_openstack_provider_network_name %}
- net: {{ openshift_openstack_provider_network_name }}
- net_name: {{ openshift_openstack_provider_network_name }}
-{% else %}
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
-{% endif %}
- secgrp:
- - { get_resource: dns-secgrp }
- - { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
-{% endif %}
- volume_size: {{ openshift_openstack_dns_volume_size }}
-{% if not openshift_openstack_provider_network_name %}
- depends_on:
- - interface
-{% endif %}
-{% endif %}
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
new file mode 100644
index 000000000..eb13a58ba
--- /dev/null
+++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
@@ -0,0 +1,157 @@
+"""
+Ansible action plugin to generate pv and pvc dictionaries lists
+"""
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+
+class ActionModule(ActionBase):
+ """Action plugin to execute health checks."""
+
+ def get_templated(self, var_to_template):
+ """Return a properly templated ansible variable"""
+ return self._templar.template(self.task_vars.get(var_to_template))
+
+ def build_common(self, varname=None):
+ """Retrieve common variables for each pv and pvc type"""
+ volume = self.get_templated(str(varname) + '_volume_name')
+ size = self.get_templated(str(varname) + '_volume_size')
+ labels = self.task_vars.get(str(varname) + '_labels')
+ if labels:
+ labels = self._templar.template(labels)
+ else:
+ labels = dict()
+ access_modes = self.get_templated(str(varname) + '_access_modes')
+ return (volume, size, labels, access_modes)
+
+ def build_pv_nfs(self, varname=None):
+ """Build pv dictionary for nfs storage type"""
+ host = self.task_vars.get(str(varname) + '_host')
+ if host:
+ self._templar.template(host)
+ elif host is None:
+ groups = self.task_vars.get('groups')
+ default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group')
+ if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleModuleError("|failed no storage host detected")
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ directory = self.get_templated(str(varname) + '_nfs_directory')
+ path = directory + '/' + volume
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ def build_pv_openstack(self, varname=None):
+ """Build pv dictionary for openstack storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ filesystem = self.get_templated(str(varname) + '_openstack_filesystem')
+ volume_id = self.get_templated(str(varname) + '_openstack_volumeID')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ def build_pv_glusterfs(self, varname=None):
+ """Build pv dictionary for glusterfs storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints')
+ path = self.get_templated(str(varname) + '_glusterfs_path')
+ read_only = self.get_templated(str(varname) + '_glusterfs_readOnly')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ def build_pv_dict(self, varname=None):
+ """Check for the existence of PV variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv and self._templar.template(create_pv):
+ if kind == 'nfs':
+ return self.build_pv_nfs(varname=varname)
+
+ elif kind == 'openstack':
+ return self.build_pv_openstack(varname=varname)
+
+ elif kind == 'glusterfs':
+ return self.build_pv_glusterfs(varname=varname)
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ varname)
+ raise errors.AnsibleModuleError(msg)
+ return None
+
+ def build_pvc_dict(self, varname=None):
+ """Check for the existence of PVC variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv:
+ create_pv = self._templar.template(create_pv)
+ create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
+ if create_pvc:
+ create_pvc = self._templar.template(create_pvc)
+ if kind != 'object' and create_pv and create_pvc:
+ volume, size, _, access_modes = self.build_common(varname=varname)
+ return dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return None
+
+ def run(self, tmp=None, task_vars=None):
+ """Run generate_pv_pvcs_list action plugin"""
+ result = super(ActionModule, self).run(tmp, task_vars)
+ # Ignore settting self.task_vars outside of init.
+ # pylint: disable=W0201
+ self.task_vars = task_vars or {}
+
+ result["changed"] = False
+ result["failed"] = False
+ result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
+ vars_to_check = ['openshift_hosted_registry_storage',
+ 'openshift_hosted_router_storage',
+ 'openshift_hosted_etcd_storage',
+ 'openshift_logging_storage',
+ 'openshift_loggingops_storage',
+ 'openshift_metrics_storage',
+ 'openshift_prometheus_storage',
+ 'openshift_prometheus_alertmanager_storage',
+ 'openshift_prometheus_alertbuffer_storage']
+ persistent_volumes = []
+ persistent_volume_claims = []
+ for varname in vars_to_check:
+ pv_dict = self.build_pv_dict(varname)
+ if pv_dict:
+ persistent_volumes.append(pv_dict)
+ pvc_dict = self.build_pvc_dict(varname)
+ if pvc_dict:
+ persistent_volume_claims.append(pvc_dict)
+ result["persistent_volumes"] = persistent_volumes
+ result["persistent_volume_claims"] = persistent_volume_claims
+ return result
diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml
new file mode 100644
index 000000000..b16e164e6
--- /dev/null
+++ b/roles/openshift_persistent_volumes/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+
+openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config'
+
+openshift_persistent_volume_extras: []
+openshift_persistent_volume_claims_extras: []
+
+glusterfs_pv: []
+glusterfs_pvc: []
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 19e9a56b7..48b0699ab 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,4 +9,5 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies: {}
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index e431e978c..0b4dd7d1f 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -9,39 +9,36 @@
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Deploy PersistentVolume definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
- src: persistent-volume.yml.j2
- when: persistent_volumes | length > 0
- changed_when: False
+- set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
+ path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
+ readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
-- name: Create PersistentVolumes
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volumes.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pv_create_output
- when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
- changed_when: ('created' in pv_create_output.stdout)
+- name: create standard pv and pvc lists
+ # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ generate_pv_pvcs_list: {}
+ register: l_pv_pvcs_list
-- name: Deploy PersistentVolumeClaim definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
- src: persistent-volume-claim.yml.j2
- when: persistent_volume_claims | length > 0
- changed_when: False
+- include_tasks: pv.yml
+ vars:
+ l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}"
+ persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}"
-- name: Create PersistentVolumeClaims
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volume-claims.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pvc_create_output
- when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
- changed_when: ('created' in pvc_create_output.stdout)
+- include_tasks: pvc.yml
+ vars:
+ l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}"
+ persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}"
- name: Delete temp directory
file:
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
new file mode 100644
index 000000000..346605ff7
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolume definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
+ src: persistent-volume.yml.j2
+ when: persistent_volumes | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumes
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volumes.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pv_create_output
+ when: persistent_volumes | length > 0
+ failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
new file mode 100644
index 000000000..e44f9b18f
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolumeClaim definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
+ src: persistent-volume-claim.yml.j2
+ when: persistent_volume_claims | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumeClaims
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volume-claims.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pvc_create_output
+ when: persistent_volume_claims | length > 0
+ failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index ee9dac7cb..9ec14208b 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -17,5 +17,5 @@ items:
capacity:
storage: "{{ volume.capacity }}"
accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
- {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }}
+ {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
{% endfor %}
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 814d6ff28..b7b3c0db2 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -45,7 +45,7 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if
openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
-openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4b33e91b4..315bc5614 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -82,7 +82,7 @@
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
-- include: glusterfs_deploy.yml
+- include_tasks: glusterfs_deploy.yml
when: glusterfs_is_native
- name: Create heketi service account
@@ -212,7 +212,7 @@
when:
- glusterfs_heketi_is_native
-- include: heketi_deploy_part1.yml
+- include_tasks: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
@@ -256,7 +256,7 @@
when:
- glusterfs_heketi_topology_load
-- include: heketi_deploy_part2.yml
+- include_tasks: heketi_deploy_part2.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
@@ -312,8 +312,8 @@
when:
- glusterfs_storageclass or glusterfs_s3_deploy
-- include: glusterblock_deploy.yml
+- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
-- include: gluster_s3_deploy.yml
+- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 71c1311cd..73b9791eb 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -46,4 +46,4 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
glusterfs_nodes: "{{ groups.glusterfs | default([]) }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index d3cba61cf..7466702b8 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -46,7 +46,7 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
when:
- glusterfs_nodes | default([]) | count > 0
- "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
@@ -56,5 +56,5 @@
register: registry_volume
- name: Create GlusterFS registry volume
- command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
- when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}"
+ when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index d2d8c6c10..b48bfc88e 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -5,13 +5,15 @@
changed_when: False
check_mode: no
-- include: glusterfs_config.yml
+- include_tasks: glusterfs_config.yml
when:
- groups.glusterfs | default([]) | count > 0
-- include: glusterfs_registry.yml
- when:
- - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
+- include_tasks: glusterfs_registry.yml
+ when: >
+ groups.glusterfs_registry | default([]) | count > 0
+ or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs')
+ or (openshift_hosted_registry_storage_glusterfs_swap | default(False))
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index c25cad74c..55e4024ec 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -20,25 +20,25 @@
- name: Ensure exports directory exists
file:
- path: "{{ openshift.hosted.registry.storage.nfs.directory }}"
+ path: "{{ openshift_hosted_registry_storage_nfs_directory }}"
state: directory
- name: Ensure export directories exist
file:
- path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
+ path: "{{ item }}"
state: directory
mode: 0777
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry }}"
- - "{{ openshift.metrics }}"
- - "{{ openshift.logging }}"
- - "{{ openshift.loggingops }}"
- - "{{ openshift.hosted.etcd }}"
- - "{{ openshift.prometheus }}"
- - "{{ openshift.prometheus.alertmanager }}"
- - "{{ openshift.prometheus.alertbuffer }}"
+ - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}"
+ - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}"
+ - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}"
+ - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}"
+ - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}"
+ - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index c2a741035..2ec8db019 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,8 +1,8 @@
-{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
-{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
-{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
-{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
-{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
-{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
-{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
+{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }}
+{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }}
+{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }}
+{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }}
+{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }}
+{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }}
+{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }}
+{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }}
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index 49dd657b5..c8e7b6d7c 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -20,7 +20,7 @@
file: path={{osnl_mount_dir}}/{{ item }} owner=nfsnobody group=nfsnobody mode=0700
with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
-- include: nfs.yml
+- include_tasks: nfs.yml
- name: Create volume json file
template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 38b398343..5d7683120 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -12,7 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_docker_facts
-- role: docker
- when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
- role: lib_utils
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 574e89899..71f957b78 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,7 +1,4 @@
---
-- set_fact:
- l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
-
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
# Expects a leading "v" in inventory, strip it off here unless
@@ -24,7 +21,7 @@
register: cli_image_version
when:
- openshift_version is not defined
- - not l_use_crio_only
+ - not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
@@ -33,7 +30,7 @@
- openshift_version is not defined
- openshift.common.deployment_type == 'origin'
- cli_image_version.stdout_lines[0].split('-') | length > 1
- - not l_use_crio_only
+ - not openshift_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -48,14 +45,14 @@
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio_only
+ - not openshift_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio_only
+ - not openshift_use_crio_only
# TODO: figure out a way to check for the openshift_version when using CRI-O.
# We should do that using the images in the ostree storage so we don't have
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 9ca49b569..f83cf9157 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -69,7 +69,7 @@
until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
-- include: enterprise.yml
+- include_tasks: enterprise.yml
when:
- deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool