summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/tasks/install.yml4
-rw-r--r--roles/ansible_service_broker/tasks/main.yml4
-rw-r--r--roles/cockpit/tasks/main.yml5
-rw-r--r--roles/contiv/tasks/download_bins.yml2
-rw-r--r--roles/contiv/tasks/main.yml6
-rw-r--r--roles/contiv/tasks/netmaster.yml8
-rw-r--r--roles/contiv/tasks/netplugin.yml6
-rw-r--r--roles/contiv/tasks/ovs.yml2
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml2
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml6
-rw-r--r--roles/contiv_facts/tasks/fedora-install.yml2
-rw-r--r--roles/contiv_facts/tasks/main.yml4
-rw-r--r--roles/docker/tasks/main.yml17
-rw-r--r--roles/docker/tasks/package_docker.yml9
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml16
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml10
-rw-r--r--roles/etcd/defaults/main.yaml10
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/backup.archive.yml4
-rw-r--r--roles/etcd/tasks/backup.copy.yml4
-rw-r--r--roles/etcd/tasks/backup.fetch.yml4
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml4
-rw-r--r--roles/etcd/tasks/backup.unarchive.yml4
-rw-r--r--roles/etcd/tasks/backup.yml2
-rw-r--r--roles/etcd/tasks/backup/backup.yml4
-rw-r--r--roles/etcd/tasks/backup_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_master_etcd_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_server_certificates.yml2
-rw-r--r--roles/etcd/tasks/ca.yml2
-rw-r--r--roles/etcd/tasks/certificates/deploy_ca.yml2
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml2
-rw-r--r--roles/etcd/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd/tasks/clean_data.yml2
-rw-r--r--roles/etcd/tasks/client_certificates.yml2
-rw-r--r--roles/etcd/tasks/disable_etcd.yml2
-rw-r--r--roles/etcd/tasks/distribute_ca2
-rw-r--r--roles/etcd/tasks/distribute_ca.yml2
-rw-r--r--roles/etcd/tasks/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/fetch_backup.yml8
-rw-r--r--roles/etcd/tasks/main.yml19
-rw-r--r--roles/etcd/tasks/migrate.add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migrate.configure_master.yml2
-rw-r--r--roles/etcd/tasks/migrate.pre_check.yml2
-rw-r--r--roles/etcd/tasks/migrate.yml2
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml4
-rw-r--r--roles/etcd/tasks/migration/check.yml8
-rw-r--r--roles/etcd/tasks/remove_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/remove_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/restart.yml21
-rw-r--r--roles/etcd/tasks/retrieve_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/server_certificates.yml4
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_rpm.yml2
-rw-r--r--roles/etcd/tasks/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade_rpm.yml2
-rw-r--r--roles/etcd/tasks/version_detect.yml55
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/flannel/tasks/main.yml2
-rw-r--r--roles/installer_checkpoint/README.md6
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py26
-rw-r--r--roles/kuryr/tasks/master.yaml4
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py6
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_router.py6
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nickhammond.logrotate/templates/logrotate.d.j22
-rw-r--r--roles/nuage_ca/tasks/main.yaml2
-rw-r--r--roles/nuage_master/tasks/main.yaml7
-rw-r--r--roles/nuage_node/tasks/main.yaml7
-rw-r--r--roles/openshift_ca/tasks/main.yml1
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_cli/defaults/main.yml5
-rw-r--r--roles/openshift_cli/tasks/main.yml12
-rw-r--r--roles/openshift_clock/tasks/main.yaml2
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml6
-rw-r--r--roles/openshift_etcd/meta/main.yml1
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml4
-rw-r--r--roles/openshift_excluder/tasks/disable.yml10
-rw-r--r--roles/openshift_excluder/tasks/enable.yml4
-rw-r--r--roles/openshift_excluder/tasks/install.yml8
-rw-r--r--roles/openshift_excluder/tasks/main.yml2
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml4
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_facts/defaults/main.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py101
-rw-r--r--roles/openshift_hosted/README.md22
-rw-r--r--roles/openshift_hosted/defaults/main.yml1
-rw-r--r--roles/openshift_hosted/tasks/registry.yml20
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml15
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml16
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml5
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml6
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j22
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j24
-rw-r--r--roles/openshift_logging_elasticsearch/templates/route_reencrypt.j22
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j24
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j22
-rw-r--r--roles/openshift_logging_kibana/templates/route_reencrypt.j22
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j24
-rw-r--r--roles/openshift_manage_node/defaults/main.yml6
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml4
-rw-r--r--roles/openshift_master/defaults/main.yml20
-rw-r--r--roles/openshift_master/tasks/main.yml24
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml10
-rw-r--r--roles/openshift_master/tasks/upgrade.yml10
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml2
-rw-r--r--roles/openshift_master/templates/htpasswd.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j214
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/tasks/main.yml8
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml4
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml18
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml8
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml2
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j22
-rw-r--r--roles/openshift_metrics/templates/heapster.j22
-rw-r--r--roles/openshift_metrics/templates/pvc.j26
-rw-r--r--roles/openshift_metrics/templates/rolebinding.j22
-rw-r--r--roles/openshift_metrics/templates/route.j22
-rw-r--r--roles/openshift_metrics/templates/secret.j26
-rw-r--r--roles/openshift_metrics/templates/service.j28
-rw-r--r--roles/openshift_metrics/templates/serviceaccount.j22
-rw-r--r--roles/openshift_nfs/tasks/setup.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml16
-rwxr-xr-xroles/openshift_node/files/networkmanager/99-origin-dns.sh (renamed from roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh)0
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml5
-rw-r--r--roles/openshift_node/tasks/config.yml5
-rw-r--r--roles/openshift_node/tasks/dnsmasq.yml (renamed from roles/openshift_node_dnsmasq/tasks/main.yml)6
-rw-r--r--roles/openshift_node/tasks/dnsmasq/network-manager.yml (renamed from roles/openshift_node_dnsmasq/tasks/network-manager.yml)0
-rw-r--r--roles/openshift_node/tasks/dnsmasq/no-network-manager.yml (renamed from roles/openshift_node_dnsmasq/tasks/no-network-manager.yml)4
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml8
-rw-r--r--roles/openshift_node/tasks/main.yml23
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml22
-rw-r--r--roles/openshift_node/tasks/upgrade.yml17
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml4
-rw-r--r--roles/openshift_node/templates/node-dnsmasq.conf.j2 (renamed from roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2)0
-rw-r--r--roles/openshift_node/templates/origin-dns.conf.j2 (renamed from roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2)0
-rw-r--r--roles/openshift_node_dnsmasq/README.md27
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml7
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml11
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml15
-rw-r--r--roles/openshift_node_facts/tasks/main.yml1
-rw-r--r--roles/openshift_node_group/defaults/main.yml2
-rw-r--r--roles/openshift_node_group/tasks/main.yml2
-rw-r--r--roles/openshift_node_group/templates/node-config.yaml.j22
-rw-r--r--roles/openshift_openstack/tasks/node-packages.yml4
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j24
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j22
-rw-r--r--roles/openshift_prometheus/README.md11
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml7
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml25
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml8
-rw-r--r--roles/openshift_prometheus/tasks/uninstall_prometheus.yaml7
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j27
-rw-r--r--roles/openshift_prometheus/vars/default_images.yml2
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml8
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml6
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml2
-rw-r--r--roles/openshift_provisioners/templates/clusterrolebinding.j22
-rw-r--r--roles/openshift_provisioners/templates/efs.j22
-rw-r--r--roles/openshift_provisioners/templates/pv.j24
-rw-r--r--roles/openshift_provisioners/templates/pvc.j24
-rw-r--r--roles/openshift_provisioners/templates/secret.j22
-rw-r--r--roles/openshift_provisioners/templates/serviceaccount.j22
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml24
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml4
-rw-r--r--roles/openshift_service_catalog/tasks/main.yml4
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j22
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j22
-rw-r--r--roles/openshift_storage_glusterfs/README.md7
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml3
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml104
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml12
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml45
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j28
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml5
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml2
-rw-r--r--roles/openshift_version/tasks/main.yml4
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml4
-rw-r--r--roles/os_firewall/tasks/firewalld.yml2
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/os_firewall/tasks/main.yml4
-rw-r--r--roles/os_update_latest/tasks/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
-rw-r--r--roles/template_service_broker/tasks/main.yml4
231 files changed, 1729 insertions, 600 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 75be7a784..f9b9b3217 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -30,9 +30,9 @@
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
-- include: validate_facts.yml
+- include_tasks: validate_facts.yml
-- include: generate_certs.yml
+- include_tasks: generate_certs.yml
# Deployment of ansible-service-broker starts here
- name: create openshift-ansible-service-broker project
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
index f5e06d163..4a3c15d01 100644
--- a/roles/ansible_service_broker/tasks/main.yml
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -1,8 +1,8 @@
---
# do any asserts here
-- include: install.yml
+- include_tasks: install.yml
when: ansible_service_broker_install | bool
-- include: remove.yml
+- include_tasks: remove.yml
when: ansible_service_broker_remove | bool
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 066ee3f3b..f63b3e49b 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install cockpit-ws
package: name={{ item }} state=present
@@ -12,6 +11,8 @@
- cockpit-docker
- "{{ cockpit_plugins }}"
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Enable cockpit-ws
systemd:
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
index 319fce46c..741c1d1da 100644
--- a/roles/contiv/tasks/download_bins.yml
+++ b/roles/contiv/tasks/download_bins.yml
@@ -8,6 +8,8 @@
yum:
name: bzip2
state: installed
+ register: result
+ until: result | success
- name: Download Bins | Download Contiv tar file
get_url:
diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml
index 40a0f9e61..cb9196a71 100644
--- a/roles/contiv/tasks/main.yml
+++ b/roles/contiv/tasks/main.yml
@@ -5,10 +5,10 @@
recurse: yes
state: directory
-- include: download_bins.yml
+- include_tasks: download_bins.yml
-- include: netmaster.yml
+- include_tasks: netmaster.yml
when: contiv_role == "netmaster"
-- include: netplugin.yml
+- include_tasks: netplugin.yml
when: contiv_role == "netplugin"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index cc52d3a43..6f15af8c2 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -1,8 +1,8 @@
---
-- include: netmaster_firewalld.yml
+- include_tasks: netmaster_firewalld.yml
when: has_firewalld
-- include: netmaster_iptables.yml
+- include_tasks: netmaster_iptables.yml
when: not has_firewalld and has_iptables
- name: Netmaster | Check is /etc/hosts file exists
@@ -70,8 +70,8 @@
state: started
register: netmaster_started
-- include: aci.yml
+- include_tasks: aci.yml
when: contiv_fabric_mode == "aci"
-- include: default_network.yml
+- include_tasks: default_network.yml
when: contiv_default_network == true
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index e861a2591..0b2f91bab 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -1,8 +1,8 @@
---
-- include: netplugin_firewalld.yml
+- include_tasks: netplugin_firewalld.yml
when: has_firewalld
-- include: netplugin_iptables.yml
+- include_tasks: netplugin_iptables.yml
when: has_iptables
- name: Netplugin | Ensure localhost entry correct in /etc/hosts
@@ -19,7 +19,7 @@
line: '::1 '
state: absent
-- include: ovs.yml
+- include_tasks: ovs.yml
when: netplugin_driver == "ovs"
- name: Netplugin | Create Netplugin bin symlink
diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml
index 0c1b994c7..5c92e90e9 100644
--- a/roles/contiv/tasks/ovs.yml
+++ b/roles/contiv/tasks/ovs.yml
@@ -1,5 +1,5 @@
---
-- include: packageManagerInstall.yml
+- include_tasks: packageManagerInstall.yml
when: source_type == "packageManager"
tags:
- binary-update
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index e0d48e643..d5726476c 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -3,7 +3,7 @@
set_fact:
did_install: false
-- include: pkgMgrInstallers/centos-install.yml
+- include_tasks: pkgMgrInstallers/centos-install.yml
when: (ansible_os_family == "RedHat") and
not is_atomic
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 91e6aadf3..62b4716a3 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -3,6 +3,8 @@
yum:
pkg=net-tools
state=latest
+ register: result
+ until: result | success
- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
get_url:
@@ -20,6 +22,8 @@
yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present
tags:
- ovs_install
+ register: result
+ until: result | success
- name: PkgMgr RHEL/CentOS | Install ovs
yum:
@@ -31,3 +35,5 @@
no_proxy: "{{ no_proxy|default('') }}"
tags:
- ovs_install
+ register: result
+ until: result | success
diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml
index db56a18c0..a57f6eb19 100644
--- a/roles/contiv_facts/tasks/fedora-install.yml
+++ b/roles/contiv_facts/tasks/fedora-install.yml
@@ -3,6 +3,8 @@
yum:
name: dnf
state: installed
+ register: result
+ until: result | success
- name: Update repo cache
command: dnf update -y
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 7a4972fca..3267a4ab0 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -81,8 +81,8 @@
has_iptables: false
# collect information about what packages are installed
-- include: rpm.yml
+- include_tasks: rpm.yml
when: has_rpm
-- include: fedora-install.yml
+- include_tasks: fedora-install.yml
when: not is_atomic and ansible_distribution == "Fedora"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 69ee62790..b02a74711 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,7 +2,7 @@
# These tasks dispatch to the proper set of docker tasks based on the
# inventory:openshift_docker_use_system_container variable
-- include: udev_workaround.yml
+- include_tasks: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
- set_fact:
@@ -20,7 +20,7 @@
- not l_use_crio_only
- name: Use Package Docker if Requested
- include: package_docker.yml
+ include_tasks: package_docker.yml
when:
- not l_use_system_container
- not l_use_crio_only
@@ -35,13 +35,13 @@
changed_when: false
- name: Use System Container Docker if Requested
- include: systemcontainer_docker.yml
+ include_tasks: systemcontainer_docker.yml
when:
- l_use_system_container
- not l_use_crio_only
- name: Add CRI-O usage Requested
- include: systemcontainer_crio.yml
+ include_tasks: systemcontainer_crio.yml
when:
- l_use_crio
- openshift_docker_is_node_or_master | bool
@@ -60,10 +60,11 @@
state: stopped
name: "{{ openshift.docker.service_name }}"
- - name: "Ensure {{ docker_alt_storage_path }} exists"
- file:
- path: "{{ docker_alt_storage_path }}"
- state: directory
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
- name: "Set the selinux context on {{ docker_alt_storage_path }}"
command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 8121163a6..044b04478 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -1,6 +1,6 @@
---
- name: Get current installed Docker version
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
retries: 4
@@ -33,9 +33,12 @@
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
+# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
+ register: result
+ until: result | success
- block:
# Extend the default Docker service unit file when using iptables-services
@@ -157,4 +160,4 @@
- meta: flush_handlers
# This needs to run after docker is restarted to account for proxy settings.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 3fe10454d..3439aa353 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,16 +3,10 @@
# TODO: Much of this file is shared with container engine tasks
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
- when: l2_docker_insecure_registries | bool
- set_fact:
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
- when: l2_docker_additional_registries | bool
-- set_fact:
- l_crio_registries: "{{ ['docker.io'] }}"
- when: not (l2_docker_additional_registries | bool)
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: l2_docker_additional_registries | bool
- set_fact:
l_openshift_image_tag: "{{ openshift_image_tag | string }}"
@@ -35,6 +29,8 @@
name: container-selinux
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
@@ -48,6 +44,8 @@
name: atomic
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
# At the time of writing the atomic command requires runc for it's own use. This
# task is here in the even that the atomic package ever removes the dependency.
@@ -56,6 +54,8 @@
name: runc
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check that overlay is in the kernel
@@ -162,7 +162,7 @@
state: directory
- name: setup firewall for CRI-O
- include: crio_firewall.yml
+ include_tasks: crio_firewall.yml
static: yes
- name: Configure the CNI network
@@ -182,6 +182,6 @@
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 84220fa66..881d83f50 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -34,6 +34,8 @@
name: container-selinux
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
# Used to pull and install the system container
- name: Ensure atomic is installed
@@ -41,6 +43,8 @@
name: atomic
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
# At the time of writing the atomic command requires runc for it's own use. This
# task is here in the even that the atomic package ever removes the dependency.
@@ -49,11 +53,15 @@
name: runc
state: present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
# Make sure docker is disabled. Errors are ignored.
- name: Disable Docker
@@ -177,6 +185,6 @@
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 4b734d4ed..a069e4d87 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -2,10 +2,18 @@
r_etcd_common_backup_tag: ''
r_etcd_common_backup_sufix_name: ''
+l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
# runc, docker, host
-r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
r_etcd_common_embedded_etcd: false
+osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
+etcd_image_dict:
+ origin: "registry.fedoraproject.org/f26/etcd"
+ openshift-enterprise: "{{ osm_etcd_image }}"
+etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
+
# etcd run on a host => use etcdctl command directly
# etcd run as a docker container => use docker exec
# etcd run as a runc container => use runc exec
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 11bd2310e..603f2531f 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -2,6 +2,8 @@
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Configure etcd profile.d aliases
template:
diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml
index 6daa6dc51..a29a90ea3 100644
--- a/roles/etcd/tasks/backup.archive.yml
+++ b/roles/etcd/tasks/backup.archive.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/archive.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/archive.yml
diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml
index cc540cbca..6e8502e3b 100644
--- a/roles/etcd/tasks/backup.copy.yml
+++ b/roles/etcd/tasks/backup.copy.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/copy.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/copy.yml
diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml
index 26ec15043..d33878804 100644
--- a/roles/etcd/tasks/backup.fetch.yml
+++ b/roles/etcd/tasks/backup.fetch.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/fetch.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/fetch.yml
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
index d2e866416..7dd0899ee 100644
--- a/roles/etcd/tasks/backup.force_new_cluster.yml
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -1,5 +1,5 @@
---
-- include: backup/vars.yml
+- include_tasks: backup/vars.yml
- name: Move content of etcd backup under the etcd data directory
command: >
@@ -9,4 +9,4 @@
command: >
chown -R etcd:etcd "{{ etcd_data_dir }}"
-- include: auxiliary/force_new_cluster.yml
+- include_tasks: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml
index 77a637360..f92e87c3d 100644
--- a/roles/etcd/tasks/backup.unarchive.yml
+++ b/roles/etcd/tasks/backup.unarchive.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/unarchive.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/unarchive.yml
diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml
index c0538e596..60bb82100 100644
--- a/roles/etcd/tasks/backup.yml
+++ b/roles/etcd/tasks/backup.yml
@@ -1,2 +1,2 @@
---
-- include: backup/backup.yml
+- include_tasks: backup/backup.yml
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index ca0d29155..9da023dbd 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -1,5 +1,5 @@
---
-- include: vars.yml
+- include_tasks: vars.yml
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
@@ -43,6 +43,8 @@
when:
- r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
+ register: result
+ until: result | success
- name: Check selinux label of '{{ etcd_data_dir }}'
command: >
diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml
index a41b032f3..c87359900 100644
--- a/roles/etcd/tasks/backup_ca_certificates.yml
+++ b/roles/etcd/tasks/backup_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_ca_certificates.yml
+- include_tasks: certificates/backup_ca_certificates.yml
diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml
index 8cf2a10cc..fa73ea590 100644
--- a/roles/etcd/tasks/backup_generated_certificates.yml
+++ b/roles/etcd/tasks/backup_generated_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_generated_certificates.yml
+- include_tasks: certificates/backup_generated_certificates.yml
diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml
index 129e1831c..5526825fa 100644
--- a/roles/etcd/tasks/backup_master_etcd_certificates.yml
+++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_master_etcd_certificates.yml
+- include_tasks: certificates/backup_master_etcd_certificates.yml
diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml
index 267ffeb4d..5f3052be1 100644
--- a/roles/etcd/tasks/backup_server_certificates.yml
+++ b/roles/etcd/tasks/backup_server_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_server_certificates.yml
+- include_tasks: certificates/backup_server_certificates.yml
diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml
index cca1e9ad7..dd4b59e24 100644
--- a/roles/etcd/tasks/ca.yml
+++ b/roles/etcd/tasks/ca.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/deploy_ca.yml
+- include_tasks: certificates/deploy_ca.yml
diff --git a/roles/etcd/tasks/certificates/deploy_ca.yml b/roles/etcd/tasks/certificates/deploy_ca.yml
index 3d32290a2..bd4dafafd 100644
--- a/roles/etcd/tasks/certificates/deploy_ca.yml
+++ b/roles/etcd/tasks/certificates/deploy_ca.yml
@@ -6,6 +6,8 @@
when: not etcd_is_atomic | bool
delegate_to: "{{ etcd_ca_host }}"
run_once: true
+ register: result
+ until: result | success
- file:
path: "{{ item }}"
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 00b8f4a0b..f4726940a 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -4,6 +4,8 @@
name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}"
state: present
when: not etcd_is_containerized | bool
+ register: result
+ until: result | success
- name: Check status of etcd certificates
stat:
diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml
index 75c110972..3410528eb 100644
--- a/roles/etcd/tasks/check_cluster_health.yml
+++ b/roles/etcd/tasks/check_cluster_health.yml
@@ -1,2 +1,2 @@
---
-- include: migration/check_cluster_health.yml
+- include_tasks: migration/check_cluster_health.yml
diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml
index d131ffd21..12538c2d0 100644
--- a/roles/etcd/tasks/clean_data.yml
+++ b/roles/etcd/tasks/clean_data.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/clean_data.yml
+- include_tasks: auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml
index 2f4108a0d..f3201816d 100644
--- a/roles/etcd/tasks/client_certificates.yml
+++ b/roles/etcd/tasks/client_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/fetch_client_certificates_from_ca.yml
+- include_tasks: certificates/fetch_client_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml
index 9202e6e48..55fb7f6ea 100644
--- a/roles/etcd/tasks/disable_etcd.yml
+++ b/roles/etcd/tasks/disable_etcd.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/disable_etcd.yml
+- include_tasks: auxiliary/disable_etcd.yml
diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca
deleted file mode 100644
index 040c5f7af..000000000
--- a/roles/etcd/tasks/distribute_ca
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/distribute_ca.yml b/roles/etcd/tasks/distribute_ca.yml
new file mode 100644
index 000000000..7d2607844
--- /dev/null
+++ b/roles/etcd/tasks/distribute_ca.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml
index 4c1f609f7..3258ab1a8 100644
--- a/roles/etcd/tasks/drop_etcdctl.yml
+++ b/roles/etcd/tasks/drop_etcdctl.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/drop_etcdctl.yml
+- include_tasks: auxiliary/drop_etcdctl.yml
diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml
deleted file mode 100644
index 513eed17a..000000000
--- a/roles/etcd/tasks/fetch_backup.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: backup/vars.yml
-
-- include: backup/archive.yml
-
-- include: backup/sync_backup.yml
-
-- include: backup/
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3e69af314..b2100801f 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,20 +7,21 @@
etcd_ip: "{{ etcd_ip }}"
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install etcd
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
+ register: result
+ until: result | success
-- include: drop_etcdctl.yml
+- include_tasks: drop_etcdctl.yml
when:
- openshift_etcd_etcdctl_profile | default(true) | bool
- block:
- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
+ command: docker pull {{ etcd_image }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
@@ -30,7 +31,7 @@
src: etcd.docker.service
when:
- etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
+ - not l_is_etcd_system_container | bool
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
@@ -90,7 +91,7 @@
enabled: no
masked: yes
daemon_reload: yes
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
register: task_result
failed_when: task_result|failed and 'could not' not in task_result.msg|lower
@@ -98,11 +99,11 @@
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
- name: Install Etcd system container
- include: system_container.yml
- when: openshift.common.is_etcd_system_container | bool
+ include_tasks: system_container.yml
+ when: l_is_etcd_system_container | bool
when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml
index bc27e4ea1..1dd3c9269 100644
--- a/roles/etcd/tasks/migrate.add_ttls.yml
+++ b/roles/etcd/tasks/migrate.add_ttls.yml
@@ -1,2 +1,2 @@
---
-- include: migration/add_ttls.yml
+- include_tasks: migration/add_ttls.yml
diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml
index 3ada6e362..5be9cebd7 100644
--- a/roles/etcd/tasks/migrate.configure_master.yml
+++ b/roles/etcd/tasks/migrate.configure_master.yml
@@ -1,2 +1,2 @@
---
-- include: migration/configure_master.yml
+- include_tasks: migration/configure_master.yml
diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml
index 124d21561..4cb67d322 100644
--- a/roles/etcd/tasks/migrate.pre_check.yml
+++ b/roles/etcd/tasks/migrate.pre_check.yml
@@ -1,2 +1,2 @@
---
-- include: migration/check.yml
+- include_tasks: migration/check.yml
diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml
index 5d5385873..1a75f63f1 100644
--- a/roles/etcd/tasks/migrate.yml
+++ b/roles/etcd/tasks/migrate.yml
@@ -1,2 +1,2 @@
---
-- include: migration/migrate.yml
+- include_tasks: migration/migrate.yml
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 14625e49e..4bdc6bcc3 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -6,7 +6,7 @@
- set_fact:
accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}"
- authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}"
+ authorizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authorizeTokenMaxAgeSeconds | default(500) }}"
controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"
- name: Re-introduce leases (as a replacement for key TTLs)
@@ -29,6 +29,6 @@
- keys: "/openshift.io/oauth/accesstokens"
ttl: "{{ accessTokenMaxAgeSeconds }}s"
- keys: "/openshift.io/oauth/authorizetokens"
- ttl: "{{ authroizeTokenMaxAgeSeconds }}s"
+ ttl: "{{ authorizeTokenMaxAgeSeconds }}s"
- keys: "/openshift.io/leases/controllers"
ttl: "{{ controllerLeaseTTL }}s"
diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml
index 5c45e5ae1..8ef81da28 100644
--- a/roles/etcd/tasks/migration/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
@@ -1,7 +1,7 @@
---
# Check the cluster is healthy
-- include: check_cluster_health.yml
+- include_tasks: check_cluster_health.yml
# Check if there is at least one v2 snapshot
- name: Check if there is at least one v2 snapshot
@@ -39,7 +39,7 @@
# - with_items not supported over block
# Check the cluster status for the first time
-- include: check_cluster_status.yml
+- include_tasks: check_cluster_status.yml
# Check the cluster status for the second time
- block:
@@ -50,7 +50,7 @@
seconds: 5
when: not l_etcd_cluster_status_ok | bool
- - include: check_cluster_status.yml
+ - include_tasks: check_cluster_status.yml
when: not l_etcd_cluster_status_ok | bool
@@ -63,5 +63,5 @@
seconds: 5
when: not l_etcd_cluster_status_ok | bool
- - include: check_cluster_status.yml
+ - include_tasks: check_cluster_status.yml
when: not l_etcd_cluster_status_ok | bool
diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml
index 36df1a1cc..c1ea4e6c9 100644
--- a/roles/etcd/tasks/remove_ca_certificates.yml
+++ b/roles/etcd/tasks/remove_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/remove_ca_certificates.yml
+- include_tasks: certificates/remove_ca_certificates.yml
diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml
index b10a4b32d..8cdeea187 100644
--- a/roles/etcd/tasks/remove_generated_certificates.yml
+++ b/roles/etcd/tasks/remove_generated_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/remove_generated_certificates.yml
+- include_tasks: certificates/remove_generated_certificates.yml
diff --git a/roles/etcd/tasks/restart.yml b/roles/etcd/tasks/restart.yml
new file mode 100644
index 000000000..d4a016eec
--- /dev/null
+++ b/roles/etcd/tasks/restart.yml
@@ -0,0 +1,21 @@
+---
+
+- name: restart etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: stop etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+- name: start etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml
index bd6c4ec85..2184e669c 100644
--- a/roles/etcd/tasks/retrieve_ca_certificates.yml
+++ b/roles/etcd/tasks/retrieve_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/retrieve_ca_certificates.yml
+- include_tasks: certificates/retrieve_ca_certificates.yml
diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml
index ae26079f9..75c35d59e 100644
--- a/roles/etcd/tasks/server_certificates.yml
+++ b/roles/etcd/tasks/server_certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ca.yml
+- include_tasks: ca.yml
when:
- etcd_ca_setup | default(True) | bool
-- include: certificates/fetch_server_certificates_from_ca.yml
+- include_tasks: certificates/fetch_server_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f71d9b551..82ac4fc84 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -5,7 +5,7 @@
tasks_from: proxy
- name: Pull etcd system container
- command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
+ command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
@@ -57,7 +57,7 @@
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
- image: "{{ openshift.etcd.etcd_image }}"
+ image: "{{ etcd_image }}"
state: latest
values:
- ETCD_DATA_DIR=/var/lib/etcd
diff --git a/roles/etcd/tasks/upgrade/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml
index 24071f9ad..6e712ba74 100644
--- a/roles/etcd/tasks/upgrade/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_image.yml
@@ -44,6 +44,8 @@
name: etcd
state: latest
when: not l_ostree_booted.stat.exists | bool
+ register: result
+ until: result | success
- name: Verify cluster is healthy
command: "{{ etcdctlv2 }} cluster-health"
diff --git a/roles/etcd/tasks/upgrade/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
index 505e28afb..e98def46e 100644
--- a/roles/etcd/tasks/upgrade/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
@@ -18,6 +18,8 @@
package:
name: "{{ l_etcd_target_package }}"
state: latest
+ register: result
+ until: result | success
- lineinfile:
destfile: "{{ etcd_conf_file }}"
diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml
index 9e69027eb..35385cb9a 100644
--- a/roles/etcd/tasks/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade_image.yml
@@ -1,2 +1,2 @@
---
-- include: upgrade/upgrade_image.yml
+- include_tasks: upgrade/upgrade_image.yml
diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml
index 29603d2b6..fbd3cd919 100644
--- a/roles/etcd/tasks/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade_rpm.yml
@@ -1,2 +1,2 @@
---
-- include: upgrade/upgrade_rpm.yml
+- include_tasks: upgrade/upgrade_rpm.yml
diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml
new file mode 100644
index 000000000..fe1e418d8
--- /dev/null
+++ b/roles/etcd/tasks/version_detect.yml
@@ -0,0 +1,55 @@
+---
+- block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+- block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not l_is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - l_is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..99ae37319 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index 3a8945a82..befe1b2e6 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -3,6 +3,8 @@
become: yes
package: name=flannel state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Set flannel etcd options
become: yes
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index f8588c4bf..68c0357b6 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -64,11 +64,11 @@ phase are stored in the `phase_attributes` variable.
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
#...
}
@@ -160,7 +160,7 @@ Health Check : Complete (0:01:10)
etcd Install : Complete (0:02:58)
Master Install : Complete (0:09:20)
Master Additional Install : In Progress (0:20:04)
- This phase can be restarted by running: playbooks/byo/openshift-master/additional_config.yml
+ This phase can be restarted by running: playbooks/openshift-master/additional_config.yml
```
[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 556e9127f..3cb1fa8d0 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -46,43 +46,43 @@ class CallbackModule(CallbackBase):
},
'installer_phase_health': {
'title': 'Health Check',
- 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ 'playbook': 'playbooks/openshift-checks/pre-install.yml'
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
'installer_phase_loadbalancer': {
'title': 'Load balancer Install',
- 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
+ 'playbook': 'playbooks/openshift-loadbalancer/config.yml'
},
'installer_phase_master': {
'title': 'Master Install',
- 'playbook': 'playbooks/byo/openshift-master/config.yml'
+ 'playbook': 'playbooks/openshift-master/config.yml'
},
'installer_phase_master_additional': {
'title': 'Master Additional Install',
- 'playbook': 'playbooks/byo/openshift-master/additional_config.yml'
+ 'playbook': 'playbooks/openshift-master/additional_config.yml'
},
'installer_phase_node': {
'title': 'Node Install',
- 'playbook': 'playbooks/byo/openshift-node/config.yml'
+ 'playbook': 'playbooks/openshift-node/config.yml'
},
'installer_phase_glusterfs': {
'title': 'GlusterFS Install',
- 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml'
+ 'playbook': 'playbooks/openshift-glusterfs/config.yml'
},
'installer_phase_hosted': {
'title': 'Hosted Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml'
+ 'playbook': 'playbooks/openshift-hosted/config.yml'
},
'installer_phase_metrics': {
'title': 'Metrics Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
+ 'playbook': 'playbooks/openshift-metrics/config.yml'
},
'installer_phase_logging': {
'title': 'Logging Install',
@@ -90,15 +90,15 @@ class CallbackModule(CallbackBase):
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-prometheus.yml'
+ 'playbook': 'playbooks/openshift-prometheus/config.yml'
},
'installer_phase_servicecatalog': {
'title': 'Service Catalog Install',
- 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
+ 'playbook': 'playbooks/openshift-service-catalog/config.yml'
},
'installer_phase_management': {
'title': 'Management Install',
- 'playbook': 'playbooks/byo/openshift-management/config.yml'
+ 'playbook': 'playbooks/openshift-management/config.yml'
},
}
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
index 55ab16f74..1cc6d2375 100644
--- a/roles/kuryr/tasks/master.yaml
+++ b/roles/kuryr/tasks/master.yaml
@@ -1,6 +1,6 @@
---
-- name: Perform OpenShit ServiceAccount config
- include: serviceaccount.yaml
+- name: Perform OpenShift ServiceAccount config
+ include_tasks: serviceaccount.yaml
- name: Create kuryr manifests tempdir
command: mktemp -d
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 146f71f68..5969da7ca 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -3154,14 +3154,14 @@ def main():
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
- external_host_password=dict(default=None, type='str'),
- external_host_private_key=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str', no_log=True),
+ external_host_private_key=dict(default=None, type='str', no_log=True),
# Metrics
expose_metrics=dict(default=False, type='bool'),
metrics_image=dict(default=None, type='str'),
# Stats
stats_user=dict(default=None, type='str'),
- stats_password=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str', no_log=True),
stats_port=dict(default=1936, type='int'),
# extra
cacert_file=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_adm_router.py b/roles/lib_openshift/src/ansible/oc_adm_router.py
index c6563cc2f..52499b273 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_router.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_router.py
@@ -34,14 +34,14 @@ def main():
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
- external_host_password=dict(default=None, type='str'),
- external_host_private_key=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str', no_log=True),
+ external_host_private_key=dict(default=None, type='str', no_log=True),
# Metrics
expose_metrics=dict(default=False, type='bool'),
metrics_image=dict(default=None, type='str'),
# Stats
stats_user=dict(default=None, type='str'),
- stats_password=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str', no_log=True),
stats_port=dict(default=1936, type='int'),
# extra
cacert_file=dict(default=None, type='str'),
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 657cb10ec..32d3acb86 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -2,6 +2,8 @@
- name: nickhammond.logrotate | Install logrotate
package: name=logrotate state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: nickhammond.logrotate | Setup logrotate.d scripts
template:
diff --git a/roles/nickhammond.logrotate/templates/logrotate.d.j2 b/roles/nickhammond.logrotate/templates/logrotate.d.j2
index 6453be6b2..1ad1c595c 100644
--- a/roles/nickhammond.logrotate/templates/logrotate.d.j2
+++ b/roles/nickhammond.logrotate/templates/logrotate.d.j2
@@ -7,7 +7,7 @@
{% endfor -%}
{% endif %}
{%- if item.scripts is defined -%}
- {%- for name, script in item.scripts.iteritems() -%}
+ {%- for name, script in item.scripts.items() -%}
{{ name }}
{{ script }}
endscript
diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml
index 8d73e6840..46929fa1f 100644
--- a/roles/nuage_ca/tasks/main.yaml
+++ b/roles/nuage_ca/tasks/main.yaml
@@ -2,6 +2,8 @@
- name: Install openssl
package: name=openssl state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Create CA directory
file: path="{{ nuage_ca_dir }}" state=directory
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index f3c487132..c264427de 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Set the Nuage certificate directory fact for Atomic hosts
set_fact:
@@ -62,7 +61,7 @@
become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
-- include: serviceaccount.yml
+- include_tasks: serviceaccount.yml
- name: Download the certs and keys
become: yes
@@ -82,7 +81,7 @@
- nuage.key
- nuage.kubeconfig
-- include: certificates.yml
+- include_tasks: certificates.yml
- name: Install Nuage VSD user certificate
become: yes
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 9db9dbb6a..c6b7a9b10 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -31,7 +31,7 @@
- nuage.key
- nuage.kubeconfig
-- include: certificates.yml
+- include_tasks: certificates.yml
- name: Add additional Docker mounts for Nuage for atomic hosts
become: yes
@@ -44,8 +44,7 @@
- restart node
ignore_errors: true
-- include: iptables.yml
+- include_tasks: iptables.yml
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 31f0f8e7a..05e0a1352 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -13,6 +13,7 @@
state: present
when: not openshift.common.is_containerized | bool
register: install_result
+ until: install_result | success
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index f19a421cb..48338ca1b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
+> [/playbooks/openshift-checks/certificate_expiry/](../../playbooks/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/by
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml
index ed97d539c..82da0639e 100644
--- a/roles/openshift_cli/defaults/main.yml
+++ b/roles/openshift_cli/defaults/main.yml
@@ -1 +1,6 @@
---
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 14d8a3325..7b046b2c4 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -8,17 +8,19 @@
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- block:
- name: Pull CLI Image
command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ openshift.common.cli_image }}"
+ image: "{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "docker"
when:
@@ -28,13 +30,13 @@
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
+ image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
@@ -47,3 +49,5 @@
- name: Install bash completion for oc tools
package: name=bash-completion state=present
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
index f8b02524a..82c73b583 100644
--- a/roles/openshift_clock/tasks/main.yaml
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -9,6 +9,8 @@
when:
- openshift_clock_enabled | bool
- chrony_installed.rc != 0
+ register: result
+ until: result | success
- name: Start and enable ntpd/chronyd
command: timedatectl set-ntp true
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index ab3055c8b..dff492a69 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -11,11 +11,11 @@
state: directory
when: has_cloudprovider | bool
-- include: openstack.yml
+- include_tasks: openstack.yml
when: cloudprovider_is_openstack | bool
-- include: aws.yml
+- include_tasks: aws.yml
when: cloudprovider_is_aws | bool
-- include: gce.yml
+- include_tasks: gce.yml
when: cloudprovider_is_gce | bool
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index de36b201b..7cc548f69 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -13,7 +13,6 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_etcd_facts
-- role: openshift_clock
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index 22fb39006..ed97d539c 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1,5 +1 @@
---
-- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 5add25b45..21801b994 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -2,11 +2,11 @@
- when: r_openshift_excluder_verify_upgrade
block:
- name: Include verify_upgrade.yml when upgrading
- include: verify_upgrade.yml
+ include_tasks: verify_upgrade.yml
# unexclude the current openshift/origin-excluder if it is installed so it can be updated
- name: Disable excluders before the upgrade to remove older excluding expressions
- include: unexclude.yml
+ include_tasks: unexclude.yml
vars:
# before the docker excluder can be updated, it needs to be disabled
# to remove older excluded packages that are no longer excluded
@@ -15,12 +15,12 @@
# Install any excluder that is enabled
- name: Include install.yml
- include: install.yml
+ include_tasks: install.yml
# And finally adjust an excluder in order to update host components correctly. First
# exclude then unexclude
- name: Include exclude.yml
- include: exclude.yml
+ include_tasks: exclude.yml
vars:
# Enable the docker excluder only if it is overridden
# BZ #1430612: docker excluders should be enabled even during installation and upgrade
@@ -30,7 +30,7 @@
# All excluders that are to be disabled are disabled
- name: Include unexclude.yml
- include: unexclude.yml
+ include_tasks: unexclude.yml
vars:
# If the docker override is not set, default to the generic behaviour
# BZ #1430612: docker excluders should be enabled even during installation and upgrade
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
index fce44cfb5..7c3742a06 100644
--- a/roles/openshift_excluder/tasks/enable.yml
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -1,6 +1,6 @@
---
- name: Install excluders
- include: install.yml
+ include_tasks: install.yml
- name: Enable excluders
- include: exclude.yml
+ include_tasks: exclude.yml
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 7a5bebf6f..3ac55894f 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -13,6 +13,8 @@
when:
- r_openshift_excluder_enable_docker_excluder | bool
- ansible_pkg_mgr == "yum"
+ register: result
+ until: result | success
# For DNF we do not need the "*" and if we add it, it causes an error because
@@ -26,6 +28,8 @@
when:
- r_openshift_excluder_enable_docker_excluder | bool
- ansible_pkg_mgr == "dnf"
+ register: result
+ until: result | success
- name: Install openshift excluder - yum
package:
@@ -34,6 +38,8 @@
when:
- r_openshift_excluder_enable_openshift_excluder | bool
- ansible_pkg_mgr == "yum"
+ register: result
+ until: result | success
# For DNF we do not need the "*" and if we add it, it causes an error because
# it's not a valid pkg_spec
@@ -46,6 +52,8 @@
when:
- r_openshift_excluder_enable_openshift_excluder | bool
- ansible_pkg_mgr == "dnf"
+ register: result
+ until: result | success
- set_fact:
r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index db20b4012..93d6ef149 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -32,7 +32,7 @@
- r_openshift_excluder_upgrade_target is not defined
- name: Include main action task file
- include: "{{ r_openshift_excluder_action }}.yml"
+ include_tasks: "{{ r_openshift_excluder_action }}.yml"
when:
- not ostree_booted.stat.exists | bool
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
index 42026664a..b55a9af23 100644
--- a/roles/openshift_excluder/tasks/verify_upgrade.yml
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -1,12 +1,12 @@
---
- name: Verify Docker Excluder version
- include: verify_excluder.yml
+ include_tasks: verify_excluder.yml
vars:
excluder: "{{ r_openshift_excluder_service_type }}-docker-excluder"
when: r_openshift_excluder_enable_docker_excluder | bool
- name: Verify OpenShift Excluder version
- include: verify_excluder.yml
+ include_tasks: verify_excluder.yml
vars:
excluder: "{{ r_openshift_excluder_service_type }}-excluder"
when: r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 4cb5418c6..b7acb0c5a 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -2,6 +2,8 @@
- name: Ensure growpart is installed
package: name=cloud-utils-growpart state=present
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
new file mode 100644
index 000000000..7064d727a
--- /dev/null
+++ b/roles/openshift_facts/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_cli_image_dict:
+ origin: 'openshift/origin'
+ openshift-enterprise: 'openshift3/ose'
+
+openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 99ebb7e36..bbcdbadd8 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -446,24 +446,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-def set_node_schedulability(facts):
- """ Set schedulable facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated schedulable
- facts if they were not already present
-
- """
- if 'node' in facts:
- if 'schedulable' not in facts['node']:
- if 'master' in facts:
- facts['node']['schedulable'] = False
- else:
- facts['node']['schedulable'] = True
- return facts
-
-
# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
@@ -516,49 +498,6 @@ def set_selectors(facts):
return facts
-def set_dnsmasq_facts_if_unset(facts):
- """ Set dnsmasq facts if not already present in facts
- Args:
- facts (dict) existing facts
- Returns:
- facts (dict) updated facts with values set if not previously set
- """
-
- if 'common' in facts:
- if 'master' in facts and 'dns_port' not in facts['master']:
- facts['master']['dns_port'] = 8053
-
- return facts
-
-
-def set_project_cfg_facts_if_unset(facts):
- """ Set Project Configuration facts if not already present in facts dict
- dict:
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated Project Configuration
- facts if they were not already present
-
- """
-
- config = {
- 'default_node_selector': '',
- 'project_request_message': '',
- 'project_request_template': '',
- 'mcs_allocator_range': 's0:/2',
- 'mcs_labels_per_project': 5,
- 'uid_allocator_range': '1000000000-1999999999/10000'
- }
-
- if 'master' in facts:
- for key, value in config.items():
- if key not in facts['master']:
- facts['master'][key] = value
-
- return facts
-
-
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -1563,7 +1502,8 @@ def set_builddefaults_facts(facts):
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+ if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1627,20 +1567,16 @@ def set_container_facts_if_unset(facts):
deployment_type = facts['common']['deployment_type']
if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
- cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
else:
master_image = 'openshift/origin'
- cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1657,8 +1593,6 @@ def set_container_facts_if_unset(facts):
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
- if 'cli_image' not in facts['common']:
- facts['common']['cli_image'] = cli_image
if 'pod_image' not in facts['common']:
facts['common']['pod_image'] = pod_image
if 'router_image' not in facts['common']:
@@ -1667,8 +1601,6 @@ def set_container_facts_if_unset(facts):
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
- if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
- facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
@@ -1840,8 +1772,6 @@ class OpenShiftFacts(object):
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
- facts = set_project_cfg_facts_if_unset(facts)
- facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
@@ -1851,7 +1781,6 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
- facts = set_dnsmasq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
@@ -1972,6 +1901,7 @@ class OpenShiftFacts(object):
glusterfs=dict(
endpoints='glusterfs-registry-endpoints',
path='glusterfs-registry-volume',
+ ips=[],
readOnly=False,
swap=False,
swapcopy=True),
@@ -2256,14 +2186,27 @@ class OpenShiftFacts(object):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
+
if len(keys) > 0 and keys[0] != self.role:
continue
- for key in keys:
- if key == keys[-1]:
- current_level[key] = value
- elif key not in current_level:
- current_level[key] = dict()
- current_level = current_level[key]
+
+ # Build a dictionary from the split fact keys.
+ # After this loop oo_env_facts is the resultant dictionary.
+ # For example:
+ # fact = "openshift_metrics_install_metrics"
+ # value = 'true'
+ # keys = ['metrics', 'install', 'metrics']
+ # result = {'metrics': {'install': {'metrics': 'true'}}}
+ for i, _ in enumerate(keys):
+ # This is the last key. Set the value.
+ if i == (len(keys) - 1):
+ current_level[keys[i]] = value
+ # This is a key other than the last key. Set as
+ # dictionary and continue.
+ else:
+ current_level[keys[i]] = dict()
+ current_level = current_level[keys[i]]
+
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
additive_facts_to_overwrite=[],
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index d6f6e3e09..a1c2c3956 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -34,13 +34,27 @@ variables also control configuration behavior:
| Name | Default value | Description |
|----------------------------------------------|---------------|------------------------------------------------------------------------------|
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
-| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume |
+| openshift_hosted_registry_storage_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_storage_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_storage_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_storage_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_storage_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
+| openshift_hosted_registry_storage_glusterfs_ips | `[]` | A list of IP addresses of the nodes of the GlusterFS cluster to use for hosted registry storage
+
+**NOTE:** Configuring a value for
+`openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry`
+host group is not allowed. Specifying a `glusterfs_registry` host group
+indicates that a new GlusterFS cluster should be configured, whereas
+specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
+to use a pre-configured GlusterFS cluster for the registry storage.
+
+_
Dependencies
------------
* openshift_hosted_facts
+* openshift_persistent_volumes
Example Playbook
----------------
@@ -56,6 +70,10 @@ Example Playbook
cafile: /path/to/my-router-ca.crt
openshift_hosted_router_registryurl: 'registry.access.redhat.com/openshift3/ose-haproxy-router:v3.0.2.0'
openshift_hosted_router_selector: 'type=infra'
+ openshift_hosted_registry_storage_kind=glusterfs
+ openshift_hosted_registry_storage_glusterfs_path=external_glusterfs_volume_name
+ openshift_hosted_registry_storage_glusterfs_ips=['192.168.20.239','192.168.20.96','192.168.20.114']
+
```
License
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 2af42fba4..e70c0c420 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -70,7 +70,6 @@ r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | defau
openshift_hosted_registry_name: docker-registry
openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
openshift_hosted_registry_cert_expire_days: 730
-
r_openshift_hosted_registry_os_firewall_deny: []
r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index eaaac9da2..9f2ef4e40 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: setup firewall
include: firewall.yml
vars:
@@ -36,13 +42,13 @@
l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
when: l_node_count | int > 0
-
- name: set openshift_hosted facts
set_fact:
openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
+ openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}"
- name: Update registry environment variables when pushing via dns
set_fact:
@@ -113,6 +119,11 @@
when:
- openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
+- include: storage/glusterfs_endpoints.yml
+ when:
+ - openshift_hosted_registry_storage_glusterfs_ips|length > 0
+ - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs']
+
- name: Create OpenShift registry
oc_adm_registry:
name: "{{ openshift_hosted_registry_name }}"
@@ -141,3 +152,10 @@
- include: storage/glusterfs.yml
when:
- openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index c2954fde1..7cae67baa 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -12,7 +12,7 @@
namespace: "{{ openshift_hosted_registry_namespace }}"
state: list
kind: pod
- selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
+ selector: "{% for label, value in registry_dc.results.results[0].spec.selector.items() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
register: registry_pods
until:
- "registry_pods.results.results[0]['items'] | count > 0"
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% else %}{% set node = groups.glusterfs[0] %}{% endif %}{% if 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
@@ -79,14 +79,7 @@
- REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
when: openshift.hosted.registry.storage.glusterfs.swap
-- name: Unmount registry volume
+- name: Unmount registry volume and clean up mount point/fstab
mount:
- state: unmounted
- name: "{{ mktemp.stdout }}"
-
-- name: Delete temp mount directory
- file:
- dest: "{{ mktemp.stdout }}"
state: absent
- changed_when: False
- check_mode: no
+ name: "{{ mktemp.stdout }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
new file mode 100644
index 000000000..0f4381748
--- /dev/null
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -0,0 +1,16 @@
+---
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Generate GlusterFS registry service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
+ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry service and endpoint
+ command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}"
+ with_items:
+ - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
+ - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..607d25533
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..452c7c3e1
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..607d25533
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..452c7c3e1
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 69b061fc5..79c5793d9 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -1,11 +1,12 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install haproxy
package: name=haproxy state=present
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Pull haproxy image
command: >
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
index 6e3204589..d6ac88dcc 100644
--- a/roles/openshift_logging/tasks/generate_jks.yaml
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -24,21 +24,25 @@
local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
when: elasticsearch_jks.stat.exists
changed_when: False
+ become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
when: logging_es_jks.stat.exists
changed_when: False
+ become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
when: system_admin_jks.stat.exists
changed_when: False
+ become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
when: truststore_jks.stat.exists
changed_when: False
+ become: no
- name: pulling down signing items from host
fetch:
@@ -57,10 +61,12 @@
vars:
- top_dir: "{{local_tmp.stdout}}"
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+ become: no
- name: Run JKS generation script
local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
check_mode: no
+ become: no
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- name: Pushing locally generated JKS certs to remote host...
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 462128366..8acff8141 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -30,7 +30,7 @@ spec:
serviceAccountName: aggregated-logging-curator
{% if curator_node_selector is iterable and curator_node_selector | length > 0 %}
nodeSelector:
-{% for key, value in curator_node_selector.iteritems() %}
+{% for key, value in curator_node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 2bd02af60..770892d52 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -270,7 +270,7 @@
port: 443
targetPort: 4443
selector:
- component: "{{ es_component }}-prometheus"
+ component: "{{ es_component }}"
provider: openshift
- oc_edit:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0c7d8b46e..0bfa9e85b 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -34,7 +34,7 @@ spec:
{% endfor %}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
-{% for key, value in es_node_selector.iteritems() %}
+{% for key, value in es_node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index 063f9c5ae..3c6896df4 100644
--- a/roles/openshift_logging_elasticsearch/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -6,7 +6,7 @@ metadata:
logging-infra: support
{% if annotations is defined %}
annotations:
-{% for key,value in annotations.iteritems() %}
+{% for key,value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
@@ -14,7 +14,7 @@ spec:
{% if pv_selector is defined and pv_selector is mapping %}
selector:
matchLabels:
-{% for key,value in pv_selector.iteritems() %}
+{% for key,value in pv_selector.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
index cf8a9e65f..d2e8b8bcb 100644
--- a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
@@ -4,7 +4,7 @@ metadata:
name: "{{obj_name}}"
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 5a4f7f762..3bd29163b 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -42,7 +42,7 @@ objects:
component: eventrouter
logging-infra: eventrouter
provider: openshift
- replicas: ${REPLICAS}
+ replicas: "${{ '{{' }}REPLICAS{{ '}}' }}"
template:
metadata:
labels:
@@ -55,7 +55,7 @@ objects:
serviceAccountName: aggregated-logging-eventrouter
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_fluentd/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 4ff86729a..57d216373 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -29,7 +29,7 @@ spec:
serviceAccountName: aggregated-logging-kibana
{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
nodeSelector:
-{% for key, value in kibana_node_selector.iteritems() %}
+{% for key, value in kibana_node_selector.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
index cf8a9e65f..d2e8b8bcb 100644
--- a/roles/openshift_logging_kibana/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
@@ -4,7 +4,7 @@ metadata:
name: "{{obj_name}}"
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_mux/files/secure-forward.conf
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index cfb13d59b..2337c33d5 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -29,7 +29,7 @@ spec:
serviceAccountName: aggregated-logging-mux
{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
nodeSelector:
-{% for key, value in mux_node_selector.iteritems() %}
+{% for key, value in mux_node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
@@ -59,7 +59,7 @@ spec:
{% endif %}
{% endif %}
ports:
- - containerPort: "{{ openshift_logging_mux_port }}"
+ - containerPort: {{ openshift_logging_mux_port }}
name: mux-forward
volumeMounts:
- name: config
diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml
new file mode 100644
index 000000000..f0e728a3f
--- /dev/null
+++ b/roles/openshift_manage_node/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# openshift_manage_node_is_master is set at the play level.
+openshift_manage_node_is_master: False
+
+# Default is to be schedulable except for master nodes.
+l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}"
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index fbbac1176..247757ca9 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -37,7 +37,7 @@
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
- schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
+ schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}"
retries: 10
delay: 5
register: node_schedulable
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
index 50a5252cc..24b2ce6ac 100644
--- a/roles/openshift_management/tasks/add_container_provider.yml
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure lib_openshift modules are available
- include_role:
- role: lib_openshift
-
- name: Ensure OpenShift facts module is available
include_role:
role: openshift_facts
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 3fb94fff8..8e4a46ebb 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -7,6 +7,22 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+
+l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+openshift_master_dns_port: 8053
+osm_default_node_selector: ''
+osm_project_request_template: ''
+osm_mcs_allocator_range: 's0:/2'
+osm_mcs_labels_per_project: 5
+osm_uid_allocator_range: '1000000000-1999999999/10000'
+osm_project_request_message: ''
+
openshift_node_ips: []
r_openshift_master_clean_install: false
r_openshift_master_etcd3_storage: false
@@ -18,9 +34,9 @@ default_r_openshift_master_os_firewall_allow:
- service: api controllers https
port: "{{ openshift.master.controllers_port }}/tcp"
- service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
+ port: "{{ openshift_master_dns_port }}/tcp"
- service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
+ port: "{{ openshift_master_dns_port }}/udp"
- service: etcd embedded
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index b6d3539b1..e52cd6231 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -31,8 +31,7 @@
- openshift.common.is_containerized | bool
- name: Open up firewall ports
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install Master package
package:
@@ -40,6 +39,8 @@
state: present
when:
- not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Create r_openshift_master_data_dir
file:
@@ -89,6 +90,8 @@
- item.kind == 'HTPasswdPasswordIdentityProvider'
- not openshift.common.is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
+ register: result
+ until: result | success
- name: Ensure htpasswd directory exists
file:
@@ -172,16 +175,16 @@
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
- name: Update journald config
- include: journald.yml
+ include_tasks: journald.yml
- name: Install the systemd units
- include: systemd_units.yml
+ include_tasks: systemd_units.yml
- name: Install Master system container
- include: system_container.yml
+ include_tasks: system_container.yml
when:
- openshift.common.is_containerized | bool
- - openshift.common.is_master_system_container | bool
+ - l_is_master_system_container | bool
- name: Create session secrets file
template:
@@ -212,10 +215,10 @@
- restart master api
- restart master controllers
-- include: bootstrap_settings.yml
+- include_tasks: bootstrap_settings.yml
when: openshift_master_bootstrap_enabled | default(False)
-- include: set_loopback_context.yml
+- include_tasks: set_loopback_context.yml
- name: Start and enable master api on first master
systemd:
@@ -273,7 +276,7 @@
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
-- include: check_master_api_is_ready.yml
+- include_tasks: check_master_api_is_ready.yml
when:
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
@@ -307,6 +310,7 @@
- openshift.master.cluster_method == 'pacemaker'
- not openshift.common.is_containerized | bool
register: l_install_result
+ until: l_install_result | success
- name: Start and enable cluster service
systemd:
@@ -323,5 +327,5 @@
- l_install_result | changed
- name: node bootstrap settings
- include: bootstrap.yml
+ include_tasks: bootstrap.yml
when: openshift_master_bootstrap_enabled | default(False)
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 843352532..23386f11b 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -6,7 +6,7 @@
- name: Pre-pull master system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Pulling layer' in l_pull_result.stdout"
@@ -18,7 +18,7 @@
- name: Install or Update HA api master system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-master-api"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=api
@@ -26,7 +26,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-master-controllers"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index b0fa72f19..9d11ed574 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -9,7 +9,7 @@
when:
- openshift.common.is_containerized | bool
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: Disable the legacy master service if it exists
systemd:
@@ -26,7 +26,7 @@
ignore_errors: true
when:
- openshift.master.cluster_method == "native"
- - not openshift.common.is_master_system_container | bool
+ - not l_is_master_system_container | bool
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
@@ -36,7 +36,7 @@
changed_when: "'Downloaded newer image' in l_pull_result.stdout"
when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_master_system_container | bool
+ - not l_is_master_system_container | bool
- name: Create the ha systemd unit files
template:
@@ -44,7 +44,7 @@
dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
when:
- openshift.master.cluster_method == "native"
- - not openshift.common.is_master_system_container | bool
+ - not l_is_master_system_container | bool
with_items:
- api
- controllers
@@ -64,7 +64,7 @@
- controllers
when:
- openshift.master.cluster_method == "native"
- - not openshift.common.is_master_system_container | bool
+ - not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml
index 92371921d..f84cf2f6e 100644
--- a/roles/openshift_master/tasks/upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade.yml
@@ -1,16 +1,16 @@
---
-- include: upgrade/rpm_upgrade.yml
+- include_tasks: upgrade/rpm_upgrade.yml
when: not openshift.common.is_containerized | bool
-- include: upgrade/upgrade_scheduler.yml
+- include_tasks: upgrade/upgrade_scheduler.yml
# master_config_hook is passed in from upgrade play.
-- include: "upgrade/{{ master_config_hook }}"
+- include_tasks: "upgrade/{{ master_config_hook }}"
when: master_config_hook is defined
-- include: journald.yml
+- include_tasks: journald.yml
-- include: systemd_units.yml
+- include_tasks: systemd_units.yml
- name: Check for ca-bundle.crt
stat:
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index f914a9978..caab3045a 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -18,3 +18,5 @@
- "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ register: result
+ until: result | success
diff --git a/roles/openshift_master/templates/htpasswd.j2 b/roles/openshift_master/templates/htpasswd.j2
index ba2c02e20..7e2e05076 100644
--- a/roles/openshift_master/templates/htpasswd.j2
+++ b/roles/openshift_master/templates/htpasswd.j2
@@ -1,5 +1,5 @@
{% if 'htpasswd_users' in openshift.master %}
-{% for user,pass in openshift.master.htpasswd_users.iteritems() %}
+{% for user,pass in openshift.master.htpasswd_users.items() %}
{{ user ~ ':' ~ pass }}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 629fe3286..a0f00e545 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -65,7 +65,7 @@ disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
- bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift_master_dns_port }}
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
@@ -196,13 +196,13 @@ policyConfig:
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
projectConfig:
- defaultNodeSelector: "{{ openshift.master.default_node_selector }}"
- projectRequestMessage: "{{ openshift.master.project_request_message }}"
- projectRequestTemplate: "{{ openshift.master.project_request_template }}"
+ defaultNodeSelector: "{{ osm_default_node_selector }}"
+ projectRequestMessage: "{{ osm_project_request_message }}"
+ projectRequestTemplate: "{{ osm_project_request_template }}"
securityAllocator:
- mcsAllocatorRange: "{{ openshift.master.mcs_allocator_range }}"
- mcsLabelsPerProject: {{ openshift.master.mcs_labels_per_project }}
- uidAllocatorRange: "{{ openshift.master.uid_allocator_range }}"
+ mcsAllocatorRange: "{{ osm_mcs_allocator_range }}"
+ mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
+ uidAllocatorRange: "{{ osm_uid_allocator_range }}"
routingConfig:
subdomain: "{{ openshift_master_default_subdomain | default("") }}"
serviceAccountConfig:
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 40705d357..41bfc72cb 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -10,5 +10,5 @@
failed_when: false
when: openshift.master.cluster_method == "pacemaker"
-- include: configure.yml
+- include_tasks: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index cf0be3bef..20cc5358e 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -55,8 +55,6 @@
embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
- # defaults to 8053 when using dnsmasq in 1.2/3.2
- dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
@@ -77,12 +75,6 @@
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
- default_node_selector: "{{ osm_default_node_selector | default(None) }}"
- project_request_message: "{{ osm_project_request_message | default(None) }}"
- project_request_template: "{{ osm_project_request_template | default(None) }}"
- mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
- mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
- uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 3dc15d58b..bb842d710 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -8,4 +8,4 @@
--serial='{{ mktemp.stdout }}/ca.serial.txt'
--name="metrics-signer@{{lookup('pipe','date +%s')}}"
-- include: generate_hawkular_certificates.yaml
+- include_tasks: generate_hawkular_certificates.yaml
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 31129a6ac..0fd19c9f8 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -1,13 +1,13 @@
---
- name: generate hawkular-metrics certificates
- include: setup_certificate.yaml
+ include_tasks: setup_certificate.yaml
vars:
component: hawkular-metrics
hostnames: "hawkular-metrics,hawkular-metrics.{{ openshift_metrics_project }}.svc.cluster.local,{{ openshift_metrics_hawkular_hostname }}"
changed_when: no
- name: generate hawkular-cassandra certificates
- include: setup_certificate.yaml
+ include_tasks: setup_certificate.yaml
vars:
component: hawkular-cassandra
hostnames: hawkular-cassandra
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index 0eb852d91..a33b28ba7 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -66,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_secrets.yaml
+- include_tasks: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index fdf4ae57f..49d1d8cf1 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -1,8 +1,8 @@
---
-- include: pre_install.yaml
+- include_tasks: pre_install.yaml
- name: Install Metrics
- include: "{{ role_path }}/tasks/install_{{ include_file }}.yaml"
+ include_tasks: "install_{{ include_file }}.yaml"
with_items:
- support
- heapster
@@ -13,11 +13,11 @@
when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
- include: install_heapster.yaml
+ include_tasks: install_heapster.yaml
when: openshift_metrics_heapster_standalone | bool
- name: Install Hawkular OpenShift Agent (HOSA)
- include: install_hosa.yaml
+ include_tasks: install_hosa.yaml
when: openshift_metrics_install_hawkular_agent | default(false) | bool
- find:
@@ -34,7 +34,7 @@
changed_when: no
- name: Create objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_project }}"
@@ -58,7 +58,7 @@
changed_when: no
- name: Create Hawkular Agent objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
@@ -67,7 +67,7 @@
with_items: "{{ hawkular_agent_object_defs.results }}"
when: openshift_metrics_install_hawkular_agent | bool
-- include: update_master_config.yaml
+- include_tasks: update_master_config.yaml
- command: >
{{openshift.common.client_binary}}
@@ -80,11 +80,11 @@
changed_when: no
- name: Scaling down cluster to recognize changes
- include: stop_metrics.yaml
+ include_tasks: stop_metrics.yaml
when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
- include: start_metrics.yaml
+ include_tasks: start_metrics.yaml
tags: openshift_metrics_start_cluster
when:
- openshift_metrics_start_cluster | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index 584e3be05..c3727d530 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -19,7 +19,7 @@
- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
when: keytool_check.rc == 1
-- include: generate_certificates.yaml
-- include: generate_serviceaccounts.yaml
-- include: generate_services.yaml
-- include: generate_rolebindings.yaml
+- include_tasks: generate_certificates.yaml
+- include_tasks: generate_serviceaccounts.yaml
+- include_tasks: generate_services.yaml
+- include_tasks: generate_rolebindings.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 10509fc1e..9dfe360bb 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -43,15 +43,15 @@
check_mode: no
tags: metrics_init
-- include: install_metrics.yaml
+- include_tasks: install_metrics.yaml
when:
- openshift_metrics_install_metrics | bool
-- include: uninstall_metrics.yaml
+- include_tasks: uninstall_metrics.yaml
when:
- not openshift_metrics_install_metrics | bool
-- include: uninstall_hosa.yaml
+- include_tasks: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
- name: Delete temp directory
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 403b1252c..1265c7bfd 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -1,6 +1,6 @@
---
- name: stop metrics
- include: stop_metrics.yaml
+ include_tasks: stop_metrics.yaml
- name: remove metrics components
command: >
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 6a3811598..11476bf75 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -24,7 +24,7 @@ spec:
- {{openshift_metrics_cassandra_storage_group}}
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 0662bea53..e976bc222 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -19,7 +19,7 @@ spec:
serviceAccount: hawkular
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
index 40d09e9fa..04e2b2937 100644
--- a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -19,7 +19,7 @@ spec:
serviceAccount: hawkular-openshift-agent
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index e732c1eee..0d4dd0e2b 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -20,7 +20,7 @@ spec:
serviceAccountName: heapster
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index b4e6a1503..9a4b428ec 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -7,13 +7,13 @@ metadata:
metrics-infra: support
{% elif labels %}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{ key }}: {{ value }}
{% endfor %}
{% endif %}
{% if annotations is defined and annotations %}
annotations:
-{% for key,value in annotations.iteritems() %}
+{% for key,value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
@@ -21,7 +21,7 @@ spec:
{% if pv_selector is defined and pv_selector is mapping %}
selector:
matchLabels:
-{% for key,value in pv_selector.iteritems() %}
+{% for key,value in pv_selector.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/rolebinding.j2 b/roles/openshift_metrics/templates/rolebinding.j2
index 5230f0780..a9a24c157 100644
--- a/roles/openshift_metrics/templates/rolebinding.j2
+++ b/roles/openshift_metrics/templates/rolebinding.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if labels is defined %}
labels:
-{% for k, v in labels.iteritems() %}
+{% for k, v in labels.items() %}
{{ k }}: {{ v }}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 253d6ecf5..9d628b666 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -7,7 +7,7 @@ metadata:
{% endif %}
{% if labels is defined and labels %}
labels:
-{% for k, v in labels.iteritems() %}
+{% for k, v in labels.items() %}
{{ k }}: {{ v }}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_metrics/templates/secret.j2 b/roles/openshift_metrics/templates/secret.j2
index 5b9dba122..b788be04e 100644
--- a/roles/openshift_metrics/templates/secret.j2
+++ b/roles/openshift_metrics/templates/secret.j2
@@ -4,15 +4,15 @@ metadata:
name: "{{ name }}"
{% if annotations is defined%}
annotations:
-{% for key, value in annotations.iteritems() %}
+{% for key, value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
labels:
-{% for k, v in labels.iteritems() %}
+{% for k, v in labels.items() %}
{{ k }}: {{ v }}
{% endfor %}
data:
-{% for k, v in data.iteritems() %}
+{% for k, v in data.items() %}
{{ k }}: {{ v }}
{% endfor %}
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
index ce0bc2eec..4d23982f1 100644
--- a/roles/openshift_metrics/templates/service.j2
+++ b/roles/openshift_metrics/templates/service.j2
@@ -4,13 +4,13 @@ metadata:
name: "{{obj_name}}"
{% if annotations is defined%}
annotations:
-{% for key, value in annotations.iteritems() %}
+{% for key, value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
@@ -22,7 +22,7 @@ spec:
ports:
{% for port in ports %}
-
-{% for key, value in port.iteritems() %}
+{% for key, value in port.items() %}
{{key}}: {{value}}
{% endfor %}
{% if port.targetPort is undefined %}
@@ -33,6 +33,6 @@ spec:
targetPort: {{service_targetPort}}
{% endif %}
selector:
- {% for key, value in selector.iteritems() %}
+ {% for key, value in selector.items() %}
{{key}}: {{value}}
{% endfor %}
diff --git a/roles/openshift_metrics/templates/serviceaccount.j2 b/roles/openshift_metrics/templates/serviceaccount.j2
index b22acc594..ea19f17d7 100644
--- a/roles/openshift_metrics/templates/serviceaccount.j2
+++ b/roles/openshift_metrics/templates/serviceaccount.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
index 3070de495..edb854467 100644
--- a/roles/openshift_nfs/tasks/setup.yml
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -5,6 +5,8 @@
- name: Install nfs-utils
package: name=nfs-utils state=present
+ register: result
+ until: result | success
- name: Configure NFS
lineinfile:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 89d154ad7..5a0c09f5c 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,12 +1,28 @@
---
openshift_node_debug_level: "{{ debug_level | default(2) }}"
+openshift_node_dnsmasq_install_network_manager_hook: true
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
+
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_image_tag: ''
default_r_openshift_node_image_prep_packages:
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
index f4e48b5b7..f4e48b5b7 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index b102c1b18..229c6bbed 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,15 @@
---
+- name: restart NetworkManager
+ systemd:
+ name: NetworkManager
+ state: restarted
+ enabled: True
+
+- name: restart dnsmasq
+ systemd:
+ name: dnsmasq
+ state: restarted
+
- name: restart openvswitch
systemd:
name: openvswitch
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c32aa1600..927d107c6 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,11 +17,8 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
when: not (openshift_node_upgrade_in_progress | default(False))
-- role: openshift_clock
- when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_docker
- role: openshift_cloud_provider
when: not (openshift_node_upgrade_in_progress | default(False))
-- role: openshift_node_dnsmasq
- role: lib_utils
when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b8be50f6c..a042bc01b 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -4,6 +4,8 @@
name: "{{ item }}"
state: present
with_items: "{{ r_openshift_node_image_prep_packages }}"
+ register: result
+ until: result | success
- name: create the directory for node
file:
@@ -32,8 +34,7 @@
regexp: "^CONFIG_FILE=.*"
- name: include aws sysconfig credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
when: not (openshift_node_use_instance_profiles | default(False))
#- name: update the ExecStart to have bootstrap
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 2fea33454..741a2234f 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -1,6 +1,6 @@
---
- name: Install the systemd units
- include: systemd_units.yml
+ include_tasks: systemd_units.yml
- name: Start and enable openvswitch service
systemd:
@@ -47,8 +47,7 @@
- restart node
- name: include aws provider credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
when: not (openshift_node_use_instance_profiles | default(False))
# Necessary because when you're on a node that's also a master the master will be
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node/tasks/dnsmasq.yml
index 9bbaafc29..f210a3a21 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node/tasks/dnsmasq.yml
@@ -13,6 +13,8 @@
- name: Install dnsmasq
package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: ensure origin/node directory exists
file:
@@ -59,9 +61,9 @@
state: started
# Dynamic NetworkManager based dispatcher
-- include: ./network-manager.yml
+- include_tasks: dnsmasq/network-manager.yml
when: network_manager_active | bool
# Relies on ansible in order to configure static config
-- include: ./no-network-manager.yml
+- include_tasks: dnsmasq/no-network-manager.yml
when: not network_manager_active | bool
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node/tasks/dnsmasq/network-manager.yml
index e5a92a630..e5a92a630 100644
--- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml
+++ b/roles/openshift_node/tasks/dnsmasq/network-manager.yml
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
index 8a7da66c2..541c8115a 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
@@ -7,5 +7,7 @@
name: NetworkManager
state: present
notify: restart NetworkManager
+ register: result
+ until: result | success
-- include: ./network-manager.yml
+- include_tasks: network-manager.yml
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index ebe87d6fd..d743d2188 100644
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -36,5 +36,7 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result | success
# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 6b7e40491..1ed4a05c1 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -5,6 +5,8 @@
package:
name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
+ register: result
+ until: result | success
- name: Install sdn-ovs package
package:
@@ -12,15 +14,19 @@
state: present
when:
- openshift_node_use_openshift_sdn | bool
+ register: result
+ until: result | success
- name: Install conntrack-tools package
package:
name: "conntrack-tools"
state: present
+ register: result
+ until: result | success
- when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
+ - not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index eae9ca7bc..d46b1f9c3 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,9 +6,10 @@
- deployment_type == 'openshift-enterprise'
- not openshift_use_crio | default(false)
+- include: dnsmasq.yml
+
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
#### Disable SWAP #####
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
@@ -41,7 +42,7 @@
#### End Disable Swap Block ####
- name: include node installer
- include: install.yml
+ include_tasks: install.yml
- name: Restart cri-o
systemd:
@@ -66,34 +67,34 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: include standard node config
- include: config.yml
+ include_tasks: config.yml
#### Storage class plugins here ####
- name: NFS storage plugin configuration
- include: storage_plugins/nfs.yml
+ include_tasks: storage_plugins/nfs.yml
tags:
- nfs
- name: GlusterFS storage plugin configuration
- include: storage_plugins/glusterfs.yml
+ include_tasks: storage_plugins/glusterfs.yml
when: "'glusterfs' in openshift.node.storage_plugin_deps"
- name: Ceph storage plugin configuration
- include: storage_plugins/ceph.yml
+ include_tasks: storage_plugins/ceph.yml
when: "'ceph' in openshift.node.storage_plugin_deps"
- name: iSCSI storage plugin configuration
- include: storage_plugins/iscsi.yml
+ include_tasks: storage_plugins/iscsi.yml
when: "'iscsi' in openshift.node.storage_plugin_deps"
##### END Storage #####
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
+- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
- name: include bootstrap node config
- include: bootstrap.yml
+ include_tasks: bootstrap.yml
when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 164a79b39..73dc9e130 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -6,14 +6,14 @@
- name: Pre-pull node system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update node system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 0f73ce454..8c3548475 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -17,14 +17,14 @@
- name: Pre-pull OpenVSwitch system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update OpenVSwitch system container
oc_atomic_container:
name: openvswitch
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- "DOCKER_SERVICE={{ l_service_name }}"
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index 037efe81a..72a3b837f 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -2,3 +2,5 @@
- name: Install Ceph storage plugin dependencies
package: name=ceph-common state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 1b8a7ad50..08ea71a0c 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -2,6 +2,8 @@
- name: Install GlusterFS storage plugin dependencies
package: name=glusterfs-fuse state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check for existence of fusefs sebooleans
command: getsebool {{ item }}
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index 1c5478c55..ece68dc71 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -2,3 +2,5 @@
- name: Install iSCSI storage plugin dependencies
package: name=iscsi-initiator-utils state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 7e1035893..5eacf42e8 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -2,6 +2,8 @@
- name: Install NFS storage plugin dependencies
package: name=nfs-utils state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check for existence of nfs sebooleans
command: getsebool {{ item }}
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 9c182ade6..397e1ba18 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -3,7 +3,7 @@
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
- when: not openshift.common.is_node_system_container | bool
+ when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
@@ -11,21 +11,21 @@
- when: openshift.common.is_containerized | bool
block:
- name: include node deps docker service file
- include: config/install-node-deps-docker-service-file.yml
+ include_tasks: config/install-node-deps-docker-service-file.yml
- name: include ovs service environment file
- include: config/install-ovs-service-env-file.yml
+ include_tasks: config/install-ovs-service-env-file.yml
- name: Install Node system container
- include: node_system_container.yml
+ include_tasks: node_system_container.yml
when:
- - openshift.common.is_node_system_container | bool
+ - l_is_node_system_container | bool
- name: Install OpenvSwitch system containers
- include: openvswitch_system_container.yml
+ include_tasks: openvswitch_system_container.yml
when:
- openshift_node_use_openshift_sdn | bool
- - openshift.common.is_openvswitch_system_container | bool
+ - l_is_openvswitch_system_container | bool
- block:
- name: Pre-pull openvswitch image
@@ -34,11 +34,11 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- - include: config/install-ovs-docker-service-file.yml
+ - include_tasks: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- openshift_node_use_openshift_sdn | bool
- - not openshift.common.is_openvswitch_system_container | bool
+ - not l_is_openvswitch_system_container | bool
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
+- include_tasks: config/configure-node-settings.yml
+- include_tasks: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 2bca1e974..561b56918 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -10,7 +10,7 @@
# tasks file for openshift_node_upgrade
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: Stop node and openvswitch services
service:
@@ -48,7 +48,7 @@
- openshift.common.is_containerized | bool
- openshift_use_openshift_sdn | bool
-- include: docker/upgrade.yml
+- include_tasks: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
skip_docker_restart: True
@@ -56,10 +56,10 @@
- l_docker_upgrade is defined
- l_docker_upgrade | bool
-- include: "{{ node_config_hook }}"
+- include_tasks: "{{ node_config_hook }}"
when: node_config_hook is defined
-- include: upgrade/rpm_upgrade.yml
+- include_tasks: upgrade/rpm_upgrade.yml
vars:
component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
@@ -70,7 +70,7 @@
path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
state: absent
-- include: upgrade/containerized_node_upgrade.yml
+- include_tasks: upgrade/containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
- name: Ensure containerized services stopped before Docker restart
@@ -107,6 +107,8 @@
name: openvswitch
state: latest
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Update oreg value
yedit:
@@ -165,7 +167,7 @@
value: "/etc/origin/node/resolv.conf"
# Restart all services
-- include: upgrade/restart.yml
+- include_tasks: upgrade/restart.yml
- name: Wait for node to be ready
oc_obj:
@@ -179,5 +181,4 @@
retries: 24
delay: 5
-- include_role:
- name: openshift_node_dnsmasq
+- include_tasks: dnsmasq.yml
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
index 96b94d8b6..245de60a7 100644
--- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -6,7 +6,7 @@
skip_node_svc_handlers: True
- name: Update systemd units
- include: ../systemd_units.yml
+ include_tasks: ../systemd_units.yml
# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
# play when the node has already been marked schedulable again. (this would look strange
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index a998acf21..fcbe1a598 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -8,10 +8,14 @@
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ register: result
+ until: result | success
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Install Node service file
template:
diff --git a/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2 b/roles/openshift_node/templates/node-dnsmasq.conf.j2
index 3caa3bd4a..3caa3bd4a 100644
--- a/roles/openshift_node_dnsmasq/templates/node-dnsmasq.conf.j2
+++ b/roles/openshift_node/templates/node-dnsmasq.conf.j2
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node/templates/origin-dns.conf.j2
index 6543c7c3e..6543c7c3e 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node/templates/origin-dns.conf.j2
diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md
deleted file mode 100644
index 4596190d7..000000000
--- a/roles/openshift_node_dnsmasq/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-OpenShift Node DNS resolver
-===========================
-
-Configure dnsmasq to act as a DNS resolver for an OpenShift node.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value | Description |
-|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------|
-| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance |
-
-Dependencies
-------------
-
-* openshift_common
-* openshift_node_facts
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
deleted file mode 100644
index ebcff46b5..000000000
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-openshift_node_dnsmasq_install_network_manager_hook: true
-
-# lo must always be present in this list or dnsmasq will conflict with
-# the node's dns service.
-openshift_node_dnsmasq_except_interfaces:
-- lo
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
deleted file mode 100644
index 9f98126a0..000000000
--- a/roles/openshift_node_dnsmasq/handlers/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: restart NetworkManager
- systemd:
- name: NetworkManager
- state: restarted
- enabled: True
-
-- name: restart dnsmasq
- systemd:
- name: dnsmasq
- state: restarted
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
deleted file mode 100644
index d80ed1b72..000000000
--- a/roles/openshift_node_dnsmasq/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Scott Dodson
- description: OpenShift Node DNSMasq support
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_node_facts
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index b45130400..d33d09980 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -15,7 +15,6 @@
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index d398a7fdc..7c81409a5 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -23,4 +23,4 @@ openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | de
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
openshift_node_group_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_default }}"
-openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) }}"
+openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) | int }}"
diff --git a/roles/openshift_node_group/tasks/main.yml b/roles/openshift_node_group/tasks/main.yml
index c7c15683d..43ecf1b8b 100644
--- a/roles/openshift_node_group/tasks/main.yml
+++ b/roles/openshift_node_group/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Build node config maps
- include: create_config.yml
+ include_tasks: create_config.yml
vars:
openshift_node_group_name: "{{ node_group.name }}"
openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
diff --git a/roles/openshift_node_group/templates/node-config.yaml.j2 b/roles/openshift_node_group/templates/node-config.yaml.j2
index 5e22dc6d2..3fd16247c 100644
--- a/roles/openshift_node_group/templates/node-config.yaml.j2
+++ b/roles/openshift_node_group/templates/node-config.yaml.j2
@@ -33,7 +33,7 @@ masterClientConnectionOverrides:
qps: 20
masterKubeConfig: node.kubeconfig
networkConfig:
- mtu: "{{ openshift_node_group_network_mtu }}"
+ mtu: {{ openshift_node_group_network_mtu }}
networkPluginName: {{ openshift_node_group_network_plugin }}
nodeIP: ""
podManifestConfig: null
diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml
index 7864f5269..e41104af1 100644
--- a/roles/openshift_openstack/tasks/node-packages.yml
+++ b/roles/openshift_openstack/tasks/node-packages.yml
@@ -6,6 +6,8 @@
name: "{{ item }}"
state: latest
with_items: "{{ openshift_openstack_required_packages }}"
+ register: result
+ until: result | success
- name: Install debug packages (optional)
yum:
@@ -13,3 +15,5 @@
state: latest
with_items: "{{ openshift_openstack_debug_packages }}"
when: openshift_openstack_install_debug_packages|bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index bfa65b460..0e7538629 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -724,7 +724,7 @@ resources:
type: node
subtype: app
node_labels:
-{% for k, v in openshift_openstack_cluster_node_labels.app.iteritems() %}
+{% for k, v in openshift_openstack_cluster_node_labels.app.items() %}
{{ k|e }}: {{ v|e }}
{% endfor %}
image: {{ openshift_openstack_node_image }}
@@ -788,7 +788,7 @@ resources:
type: node
subtype: infra
node_labels:
-{% for k, v in openshift_openstack_cluster_node_labels.infra.iteritems() %}
+{% for k, v in openshift_openstack_cluster_node_labels.infra.items() %}
{{ k|e }}: {{ v|e }}
{% endfor %}
image: {{ openshift_openstack_infra_image }}
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 9c5103597..ee9dac7cb 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -9,7 +9,7 @@ items:
name: "{{ volume.name }}"
{% if volume.labels is defined and volume.labels is mapping %}
labels:
-{% for key,value in volume.labels.iteritems() %}
+{% for key,value in volume.labels.items() %}
{{ key }}: {{ value }}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index 92f74928c..f1eca1da6 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -23,6 +23,17 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component
+- `openshift_prometheus_args`: Modify or add arguments for prometheus application
+
+- `openshift_prometheus_hostname`: specify the hostname for the route to prometheus `prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}`
+
+- `openshift_prometheus_alerts_hostname`: specify the hostname for the route to prometheus-alerts `prometheus_alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}`
+
+e.g
+```
+openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m']
+```
+
## PVC related variables
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 4e2cea0b9..df331a4bb 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -4,11 +4,18 @@ openshift_prometheus_state: present
openshift_prometheus_namespace: openshift-metrics
+# defaults hosts for routes
+openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
+openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
+
openshift_prometheus_node_selector: {"region":"infra"}
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
+#prometheus application arguments
+openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m']
+
# storage
# One of ['emptydir', 'pvc']
openshift_prometheus_storage_type: "emptydir"
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 21da4bc9d..ad15dc65f 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -3,7 +3,7 @@
# namespace
- name: Add prometheus project
oc_project:
- state: "{{ state }}"
+ state: present
name: "{{ openshift_prometheus_namespace }}"
node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
description: Prometheus
@@ -11,7 +11,7 @@
# secrets
- name: Set alert and prometheus secrets
oc_secret:
- state: "{{ state }}"
+ state: present
name: "{{ item }}-proxy"
namespace: "{{ openshift_prometheus_namespace }}"
contents:
@@ -24,7 +24,7 @@
# serviceaccount
- name: create prometheus serviceaccount
oc_serviceaccount:
- state: "{{ state }}"
+ state: present
name: prometheus
namespace: "{{ openshift_prometheus_namespace }}"
# TODO add annotations when supproted
@@ -48,7 +48,7 @@
# create clusterrolebinding for prometheus serviceaccount
- name: Set cluster-reader permissions for prometheus
oc_adm_policy_user:
- state: "{{ state }}"
+ state: present
namespace: "{{ openshift_prometheus_namespace }}"
resource_kind: cluster-role
resource_name: cluster-reader
@@ -58,7 +58,7 @@
# TODO join into 1 task with loop
- name: Create prometheus service
oc_service:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
namespace: "{{ openshift_prometheus_namespace }}"
selector:
@@ -76,7 +76,7 @@
- name: Create alerts service
oc_service:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
namespace: "{{ openshift_prometheus_namespace }}"
selector:
@@ -111,14 +111,17 @@
# create prometheus and alerts routes
- name: create prometheus and alerts routes
oc_route:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
+ host: "{{ item.host }}"
namespace: "{{ openshift_prometheus_namespace }}"
service_name: "{{ item.name }}"
tls_termination: reencrypt
with_items:
- name: prometheus
+ host: "{{ openshift_prometheus_hostname }}"
- name: alerts
+ host: "{{ openshift_prometheus_alerts_hostname }}"
# Storage
- name: create prometheus pvc
@@ -185,7 +188,7 @@
# In prometheus configmap create "additional.rules" section if file exists
- name: Set prometheus configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -196,7 +199,7 @@
- name: Set prometheus configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -212,7 +215,7 @@
- name: Set alertmanager configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus-alerts"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -229,7 +232,7 @@
- name: Set prometheus stateful set
oc_obj:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
kind: statefulset
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index 5cc9a67eb..38798e1f5 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -20,9 +20,11 @@
mode: 0755
changed_when: False
-- include: install_prometheus.yaml
- vars:
- state: "{{ openshift_prometheus_state }}"
+- include_tasks: install_prometheus.yaml
+ when: openshift_prometheus_state == 'present'
+
+- include_tasks: uninstall_prometheus.yaml
+ when: openshift_prometheus_state == 'absent'
- name: Delete temp directory
file:
diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
new file mode 100644
index 000000000..d746402db
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
@@ -0,0 +1,7 @@
+---
+
+# remove namespace - This will delete all the objects inside the namespace
+- name: Remove prometheus project
+ oc_project:
+ state: absent
+ name: "{{ openshift_prometheus_namespace }}"
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 456db3a57..d780550b8 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -22,7 +22,7 @@ spec:
serviceAccountName: prometheus
{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
nodeSelector:
-{% for key, value in openshift_prometheus_node_selector.iteritems() %}
+{% for key, value in openshift_prometheus_node_selector.items() %}
{{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
@@ -75,8 +75,9 @@ spec:
- name: prometheus
args:
- - --storage.tsdb.retention=6h
- - --storage.tsdb.min-block-duration=2m
+{% for arg in openshift_prometheus_args %}
+ - {{ arg }}
+{% endfor %}
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}"
diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml
index ad52a3125..31f6c1bb1 100644
--- a/roles/openshift_prometheus/vars/default_images.yml
+++ b/roles/openshift_prometheus/vars/default_images.yml
@@ -6,7 +6,7 @@ l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_alter
l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
# image version defaults
-l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0-dev.3') }}"
+l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0') }}"
l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}"
l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}"
l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}"
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
index 324fdcc82..2d1217c74 100644
--- a/roles/openshift_provisioners/tasks/install_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -16,10 +16,10 @@
when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
- name: Install support
- include: install_support.yaml
+ include_tasks: install_support.yaml
- name: Install EFS
- include: install_efs.yaml
+ include_tasks: install_efs.yaml
when: openshift_provisioners_efs | bool
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
@@ -32,7 +32,7 @@
changed_when: no
- name: Create objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- namespace: "{{ openshift_provisioners_project }}"
@@ -51,5 +51,5 @@
when: ansible_check_mode
- name: Scaling up cluster
- include: start_cluster.yaml
+ include_tasks: start_cluster.yaml
when: start_cluster | default(true) | bool
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
index d6db81ab9..93c4c394d 100644
--- a/roles/openshift_provisioners/tasks/install_support.yaml
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -10,8 +10,8 @@
changed_when: False
check_mode: no
-- include: generate_secrets.yaml
+- include_tasks: generate_secrets.yaml
-- include: generate_clusterrolebindings.yaml
+- include_tasks: generate_clusterrolebindings.yaml
-- include: generate_serviceaccounts.yaml
+- include_tasks: generate_serviceaccounts.yaml
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
index a50c78c97..4ba26b2b8 100644
--- a/roles/openshift_provisioners/tasks/main.yaml
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -12,10 +12,10 @@
check_mode: no
tags: provisioners_init
-- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+- include_tasks: install_provisioners.yaml
when: openshift_provisioners_install_provisioners | default(false) | bool
-- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+- include_tasks: uninstall_provisioners.yaml
when: not openshift_provisioners_install_provisioners | default(false) | bool
- name: Delete temp directory
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
index 0be4bc7d2..602dee773 100644
--- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -1,6 +1,6 @@
---
- name: stop provisioners
- include: stop_cluster.yaml
+ include_tasks: stop_cluster.yaml
# delete the deployment objects that we had created
- name: delete provisioner api objects
diff --git a/roles/openshift_provisioners/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2
index 994afa32d..1f26c93a4 100644
--- a/roles/openshift_provisioners/templates/clusterrolebinding.j2
+++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2
index 81b9ccca5..37fd02977 100644
--- a/roles/openshift_provisioners/templates/efs.j2
+++ b/roles/openshift_provisioners/templates/efs.j2
@@ -22,7 +22,7 @@ spec:
serviceAccountName: "{{deploy_serviceAccount}}"
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
{% endif %}
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
index f81b1617a..b648cd15e 100644
--- a/roles/openshift_provisioners/templates/pv.j2
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -4,13 +4,13 @@ metadata:
name: {{obj_name}}
{% if annotations is defined %}
annotations:
-{% for key,value in annotations.iteritems() %}
+{% for key,value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
index 0dd8772eb..0a88b7c88 100644
--- a/roles/openshift_provisioners/templates/pvc.j2
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if annotations is defined %}
annotations:
-{% for key,value in annotations.iteritems() %}
+{% for key,value in annotations.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
@@ -12,7 +12,7 @@ spec:
{% if pv_selector is defined and pv_selector is mapping %}
selector:
matchLabels:
-{% for key,value in pv_selector.iteritems() %}
+{% for key,value in pv_selector.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2
index 78824095b..2fbb28829 100644
--- a/roles/openshift_provisioners/templates/secret.j2
+++ b/roles/openshift_provisioners/templates/secret.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_provisioners/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2
index b22acc594..ea19f17d7 100644
--- a/roles/openshift_provisioners/templates/serviceaccount.j2
+++ b/roles/openshift_provisioners/templates/serviceaccount.j2
@@ -4,7 +4,7 @@ metadata:
name: {{obj_name}}
{% if labels is defined%}
labels:
-{% for key, value in labels.iteritems() %}
+{% for key, value in labels.items() %}
{{key}}: {{value}}
{% endfor %}
{% endif %}
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 95ba9fe4c..5e7bde1e1 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -9,6 +9,8 @@
# TODO: This needs to be removed and placed into a role
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
+ register: result
+ until: result | success
- name: Remove openshift_additional.repo file
file:
@@ -35,7 +37,7 @@
- when: r_openshift_repos_has_run is not defined
block:
- - include: centos_repos.yml
+ - include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
index 94d3acffc..795b8ee60 100644
--- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -16,6 +16,6 @@
# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
- name: Assign deprecated variables to correct counterparts
- include: "{{ item }}"
+ include_tasks: "{{ item }}"
with_fileglob:
- "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 70b236033..77428272c 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,7 +1,7 @@
---
# We should print out deprecations prior to any failures so that if a play does fail for other reasons
# the user would also be aware of any deprecated variables they should note to adjust
-- include: deprecations.yml
+- include_tasks: deprecations.yml
- name: Abort when conflicting deployment type variables are set
when:
@@ -53,7 +53,7 @@
openshift_release is "{{ openshift_release }}" which is not a valid version string.
Please set it to a version string like "3.4".
-- include: unsupported.yml
+- include_tasks: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index b70ab90a1..1c4984467 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -40,3 +40,27 @@
openshift_master_dynamic_provisioning_enabled to True and set an
openshift_cloudprovider_kind. You can disable this check with
'dynamic_volumes_check=False'.
+
+#if we have registry backend as glusterfs, and we have clashing configuration.
+- name: Ensure the hosted registry's GlusterFS storage is configured correctly
+ when:
+ - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs']
+ - openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips != ''
+ - "'glusterfs_registry' in groups | default([])"
+ fail:
+ msg: |-
+ Configuring a value for openshift_hosted_registry_storage_glusterfs_ips and with a glusterfs_registry host group is not allowed.
+ Specifying a glusterfs_registry host group indicates that a new GlusterFS cluster should be configured, whereas
+ specifying openshift_hosted_registry_storage_glusterfs_ips indicates wanting to use a pre-configured GlusterFS cluster for the registry storage.
+
+#if we have registry backend as glusterfs and no gluster specified.
+- name: Ensure the hosted registry's GlusterFS storage is configured correctly
+ when:
+ - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs']
+ - not openshift_hosted_registry_storage_glusterfs_ips is defined
+ - not 'glusterfs_registry' in groups | default([])
+ - not 'glusterfs' in groups | default([])
+ fail:
+ msg: |-
+ Configuring a value for openshift_hosted_registry_storage_kind=glusterfs without a any glusterfs option is not allowed.
+ Specify either openshift_hosted_registry_storage_glusterfs_ips variable or glusterfs, glusterfs_registry host groups.
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 3507330e3..41a6691c9 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -40,7 +40,7 @@
command: >
{{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
-- include: generate_certs.yml
+- include_tasks: generate_certs.yml
- copy:
src: kubeservicecatalog_roles_bindings.yml
@@ -252,7 +252,7 @@
session_affinity: None
service_type: ClusterIP
-- include: start_api_server.yml
+- include_tasks: start_api_server.yml
- name: Delete temp directory
file:
diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml
index dc0d6a370..ffdbe2b11 100644
--- a/roles/openshift_service_catalog/tasks/main.yml
+++ b/roles/openshift_service_catalog/tasks/main.yml
@@ -1,8 +1,8 @@
---
# do any asserts here
-- include: install.yml
+- include_tasks: install.yml
when: not openshift_service_catalog_remove | default(false) | bool
-- include: remove.yml
+- include_tasks: remove.yml
when: openshift_service_catalog_remove | default(false) | bool
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 0e5bb7230..4f51b8c3c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -19,7 +19,7 @@ spec:
spec:
serviceAccountName: service-catalog-apiserver
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
containers:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index e5e5f6b50..137222f04 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -19,7 +19,7 @@ spec:
spec:
serviceAccountName: service-catalog-controller
nodeSelector:
-{% for key, value in node_selector.iteritems() %}
+{% for key, value in node_selector.items() %}
{{key}}: "{{value}}"
{% endfor %}
containers:
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 03c157313..be749a2e1 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -87,7 +87,9 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_max_host_vol | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
@@ -131,8 +133,7 @@ are an exception:
| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
-Additionally, this role's behavior responds to the following registry-specific
-variables:
+Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
| Name | Default value | Description |
|-------------------------------------------------------|------------------------------|-----------------------------------------|
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index c3db36d37..814d6ff28 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -10,7 +10,9 @@ openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
-openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_block_host_vol_create: True
+openshift_storage_glusterfs_block_host_vol_size: 100
+openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -54,7 +56,9 @@ openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_ve
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
-openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
index 2cc69644c..63dd5cce6 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
@@ -2,7 +2,7 @@
kind: Template
apiVersion: v1
metadata:
- name: glusterblock
+ name: glusterblock-provisioner
labels:
glusterfs: block-template
glusterblock: template
@@ -83,7 +83,6 @@ objects:
containers:
- name: glusterblock-provisioner
image: ${IMAGE_NAME}:${IMAGE_VERSION}
- image: gluster/glusterblock-provisioner:latest
imagePullPolicy: IfNotPresent
env:
- name: PROVISIONER_NAME
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
new file mode 100644
index 000000000..63dd5cce6
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
@@ -0,0 +1,104 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index bba1de654..d6be8c726 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -29,21 +29,21 @@
src: "{{ openshift.common.examples_content_version }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "glusterblock-template.yml"
+ - "glusterblock-provisioner.yml"
- name: Create glusterblock provisioner template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: "glusterblock"
+ name: "glusterblock-provisioner"
state: present
files:
- - "{{ mktemp.stdout }}/glusterblock-template.yml"
+ - "{{ mktemp.stdout }}/glusterblock-provisioner.yml"
- name: Deploy glusterblock provisioner
oc_process:
namespace: "{{ glusterfs_namespace }}"
- template_name: "glusterblock"
+ template_name: "glusterblock-provisioner"
create: True
params:
IMAGE_NAME: "{{ glusterfs_block_image }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 2a678af57..4b33e91b4 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -4,6 +4,8 @@
when:
- not openshift.common.is_atomic | bool
- not glusterfs_heketi_is_native | bool
+ register: result
+ until: result | success
- name: Verify heketi-cli is installed
shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index e2d740f35..71c1311cd 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
@@ -42,6 +44,6 @@
glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo | bool }}"
glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
- glusterfs_nodes: "{{ groups.glusterfs }}"
+ glusterfs_nodes: "{{ groups.glusterfs | default([]) }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index f98d4c62f..30e83e79b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -77,6 +77,14 @@
files:
- "{{ mktemp.stdout }}/glusterfs-template.yml"
+- name: Check GlusterFS DaemonSet status
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: daemonset
+ name: glusterfs-{{ glusterfs_name }}
+ state: list
+ register: glusterfs_ds
+
- name: Deploy GlusterFS pods
oc_process:
namespace: "{{ glusterfs_namespace }}"
@@ -87,7 +95,9 @@
IMAGE_VERSION: "{{ glusterfs_version }}"
NODE_LABELS: "{{ glusterfs_nodeselector }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
+ GB_GLFS_LRU_COUNT: "{{ glusterfs_block_host_vol_max }}"
+ when: (glusterfs_ds.results.results[0].status is not defined) or
+ (glusterfs_ds.results.results[0].status.numberReady | default(0) < glusterfs_ds.results.results[0].status.desiredNumberScheduled | default(glusterfs_nodes | count))
- name: Wait for GlusterFS pods
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index baac52179..d3cba61cf 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
@@ -42,52 +44,13 @@
glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"
glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
- glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
+ glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
- include: glusterfs_common.yml
when:
- glusterfs_nodes | default([]) | count > 0
- "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
-- name: Delete pre-existing GlusterFS registry resources
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- kind: "{{ item.kind }}"
- name: "{{ item.name }}"
- state: absent
- with_items:
- - kind: "svc"
- name: "glusterfs-{{ glusterfs_name | default }}-endpoints"
- failed_when: False
-
-- name: Generate GlusterFS registry endpoints
- template:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
- dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
-
-- name: Copy GlusterFS registry service
- template:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
- dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
-
-- name: Create GlusterFS registry endpoints
- oc_obj:
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- state: present
- kind: endpoints
- name: "glusterfs-{{ glusterfs_name }}-endpoints"
- files:
- - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
-
-- name: Create GlusterFS registry service
- oc_obj:
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- state: present
- kind: service
- name: "glusterfs-{{ glusterfs_name }}-endpoints"
- files:
- - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
-
- name: Check if GlusterFS registry volume exists
command: "{{ glusterfs_heketi_client }} volume list"
register: registry_volume
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
index 579b11bb7..565e9be98 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -31,6 +31,12 @@
"port" : "{{ glusterfs_heketi_ssh_port }}",
"user" : "{{ glusterfs_heketi_ssh_user }}",
"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
- }
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
}
}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index c4e023c1e..c25cad74c 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -1,10 +1,11 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install nfs-utils
package: name=nfs-utils state=present
+ register: result
+ until: result | success
- name: Configure NFS
lineinfile:
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 03f4fcec0..bee786a90 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -2,6 +2,8 @@
- name: Install NFS server
package: name=nfs-utils state=present
when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
- name: Start rpcbind
systemd:
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 1c8b9046c..4f9158ade 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -93,11 +93,11 @@
- inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
block:
- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
+ include_tasks: set_version_rpm.yml
when: not is_containerized | bool
- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
+ include_tasks: set_version_containerized.yml
when: is_containerized | bool
- block:
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index b727eb74d..574e89899 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -20,7 +20,7 @@
- name: Lookup latest containerized version if no version specified
command: >
- docker run --rm {{ openshift.common.cli_image }}:latest version
+ docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- openshift_version is not defined
@@ -43,7 +43,7 @@
# and use that value instead.
- name: Set precise containerized version to configure if openshift_release specified
command: >
- docker run --rm {{ openshift.common.cli_image }}:v{{ openshift_version }} version
+ docker run --rm {{ openshift_cli_image }}:v{{ openshift_version }} version
register: cli_image_version
when:
- openshift_version is defined
diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml
index 54430f402..1e27ebaf9 100644
--- a/roles/os_firewall/tasks/firewalld.yml
+++ b/roles/os_firewall/tasks/firewalld.yml
@@ -8,6 +8,8 @@
package:
name: firewalld
state: present
+ register: result
+ until: result | success
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 2d74f2e48..a7c13e487 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -22,6 +22,8 @@
- iptables
- iptables-services
when: not r_os_firewall_is_atomic | bool
+ register: result
+ until: result | success
- name: Start and enable iptables service
systemd:
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index c477d386c..99084cd3f 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -8,12 +8,12 @@
set_fact:
r_os_firewall_is_atomic: "{{ r_os_firewall_ostree_booted.stat.exists }}"
-- include: firewalld.yml
+- include_tasks: firewalld.yml
when:
- os_firewall_enabled | bool
- os_firewall_use_firewalld | bool
-- include: iptables.yml
+- include_tasks: iptables.yml
when:
- os_firewall_enabled | bool
- not os_firewall_use_firewalld | bool
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index 6b5fd0106..60d665587 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -1,3 +1,5 @@
---
- name: Update all packages
package: name=* state=latest
+ register: result
+ until: result | success
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index b06f51908..9ca49b569 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -36,6 +36,8 @@
yum:
name: subscription-manager
state: present
+ register: result
+ until: result | success
- name: RedHat subscriptions
redhat_subscription:
diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml
index 6a4d89a46..71c8ca470 100644
--- a/roles/template_service_broker/tasks/main.yml
+++ b/roles/template_service_broker/tasks/main.yml
@@ -1,8 +1,8 @@
---
# do any asserts here
-- include: install.yml
+- include_tasks: install.yml
when: template_service_broker_install | bool
-- include: remove.yml
+- include_tasks: remove.yml
when: template_service_broker_remove | bool