summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/defaults/main.yml5
-rw-r--r--roles/docker/tasks/package_docker.yml14
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml21
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml20
-rw-r--r--roles/docker/templates/crio.conf.j25
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/etcd_common/tasks/backup.yml5
-rw-r--r--roles/etcd_common/tasks/main.yml2
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/flannel_register/defaults/main.yaml5
-rw-r--r--roles/flannel_register/templates/flannel-config.json1
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py1
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rw-r--r--roles/openshift_aws/README.md84
-rw-r--r--roles/openshift_aws/defaults/main.yml209
-rw-r--r--roles/openshift_aws/filter_plugins/filters.py28
-rw-r--r--roles/openshift_aws/meta/main.yml3
-rw-r--r--roles/openshift_aws/tasks/ami_copy.yml34
-rw-r--r--roles/openshift_aws/tasks/build_ami.yml48
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml34
-rw-r--r--roles/openshift_aws/tasks/elb.yml68
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml29
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml45
-rw-r--r--roles/openshift_aws/tasks/provision.yml54
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml66
-rw-r--r--roles/openshift_aws/tasks/s3.yml7
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml32
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml49
-rw-r--r--roles/openshift_aws/tasks/security_group.yml45
-rw-r--r--roles/openshift_aws/tasks/ssh_keys.yml (renamed from roles/openshift_aws_ssh_keys/tasks/main.yml)4
-rw-r--r--roles/openshift_aws/tasks/vpc.yml (renamed from roles/openshift_aws_vpc/tasks/main.yml)21
-rw-r--r--roles/openshift_aws_ami_copy/README.md50
-rw-r--r--roles/openshift_aws_ami_copy/tasks/main.yml26
-rw-r--r--roles/openshift_aws_elb/README.md75
-rw-r--r--roles/openshift_aws_elb/defaults/main.yml33
-rw-r--r--roles/openshift_aws_elb/meta/main.yml12
-rw-r--r--roles/openshift_aws_elb/tasks/main.yml57
-rw-r--r--roles/openshift_aws_iam_kms/README.md43
-rw-r--r--roles/openshift_aws_iam_kms/defaults/main.yml1
-rw-r--r--roles/openshift_aws_iam_kms/meta/main.yml13
-rw-r--r--roles/openshift_aws_iam_kms/tasks/main.yml18
-rw-r--r--roles/openshift_aws_launch_config/README.md72
-rw-r--r--roles/openshift_aws_launch_config/defaults/main.yml1
-rw-r--r--roles/openshift_aws_launch_config/meta/main.yml12
-rw-r--r--roles/openshift_aws_launch_config/tasks/main.yml50
-rw-r--r--roles/openshift_aws_launch_config/templates/cloud-init.j29
-rw-r--r--roles/openshift_aws_node_group/README.md77
-rw-r--r--roles/openshift_aws_node_group/defaults/main.yml58
-rw-r--r--roles/openshift_aws_node_group/tasks/main.yml32
-rw-r--r--roles/openshift_aws_s3/README.md43
-rw-r--r--roles/openshift_aws_s3/tasks/main.yml6
-rw-r--r--roles/openshift_aws_sg/README.md59
-rw-r--r--roles/openshift_aws_sg/defaults/main.yml48
-rw-r--r--roles/openshift_aws_sg/tasks/main.yml53
-rw-r--r--roles/openshift_aws_ssh_keys/README.md49
-rw-r--r--roles/openshift_aws_vpc/README.md62
-rw-r--r--roles/openshift_aws_vpc/defaults/main.yml1
-rw-r--r--roles/openshift_cfme/defaults/main.yml3
-rw-r--r--roles/openshift_cfme/meta/main.yml1
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml7
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_common/README.md45
-rw-r--r--roles/openshift_common/defaults/main.yml3
-rw-r--r--roles/openshift_common/meta/main.yml15
-rw-r--r--roles/openshift_common/tasks/main.yml78
-rw-r--r--roles/openshift_examples/meta/main.yml3
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py126
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh318
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py160
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py16
-rw-r--r--roles/openshift_health_checker/library/aos_version.py31
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py134
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py87
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py3
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py134
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py187
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py10
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py15
-rw-r--r--roles/openshift_hosted/README.md1
-rw-r--r--roles/openshift_hosted/defaults/main.yml10
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml10
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml2
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml20
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml1
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml3
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml8
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml2
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j215
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j22
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_master/README.md9
-rw-r--r--roles/openshift_master/defaults/main.yml21
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/main.yml46
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml27
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml9
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j214
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j214
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/vars/main.yml19
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py5
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_node/README.md10
-rw-r--r--roles/openshift_node/defaults/main.yml32
-rw-r--r--roles/openshift_node/handlers/main.yml5
-rw-r--r--roles/openshift_node/meta/main.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml33
-rw-r--r--roles/openshift_node/tasks/config.yml18
-rw-r--r--roles/openshift_node/tasks/install.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml39
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml25
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j212
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service19
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/README.md4
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml5
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml2
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_persistent_volumes/README.md7
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml1
-rw-r--r--roles/openshift_prometheus/README.md95
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml74
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/meta/main.yaml19
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml244
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml26
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/alertmanager.yml.j220
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus.rules.j24
-rw-r--r--roles/openshift_prometheus/templates/prometheus.yml.j2174
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j2240
-rw-r--r--roles/openshift_prometheus/tests/inventory2
-rw-r--r--roles/openshift_prometheus/tests/test.yaml5
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml21
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml4
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml2
-rw-r--r--roles/openshift_storage_glusterfs/README.md5
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml6
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml143
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j249
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml10
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/main.yml6
207 files changed, 4169 insertions, 1723 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index ed97d539c..7e206ded1 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1 +1,6 @@
---
+docker_cli_auth_config_path: '/root/.docker'
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_replace: False
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index bc52ab60c..16aea5067 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -117,6 +119,18 @@
notify:
- restart docker
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 787f51f94..0bab0899c 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,6 +3,15 @@
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
when: openshift.docker.insecure_registries
+- set_fact:
+ l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
+ when: openshift.docker.additional_registries
+- set_fact:
+ l_crio_registries: "{{ ['docker.io'] }}"
+ when: not openshift.docker.additional_registries
+- set_fact:
+ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+ when: openshift.docker.additional_registries
- name: Ensure container-selinux is installed
package:
@@ -86,12 +95,18 @@
- name: Set to default prepend
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "crio-o-fedora"
+ l_crio_image_name: "cri-o-fedora"
- - name: Use Centos based image when distribution is Red Hat or CentOS
+ - name: Use Centos based image when distribution is CentOS
set_fact:
l_crio_image_name: "cri-o-centos"
- when: ansible_distribution in ['RedHat', 'CentOS']
+ when: ansible_distribution == "CentOS"
+
+ - name: Use RHEL based image when distribution is Red Hat
+ set_fact:
+ l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_name: "cri-o"
+ when: ansible_distribution == "RedHat"
# For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- name: Use a testing registry if requested
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 57a84bc2c..146e5f430 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -100,18 +100,22 @@
l_docker_image_prepend: "registry.fedoraproject.org/f25"
when: ansible_distribution == 'Fedora'
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
- when:
- - openshift_docker_systemcontainer_image_registry_override is defined
- - openshift_docker_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
+ - name: Use a specific image if requested
+ set_fact:
+ l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_override is defined
+ - openshift_docker_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_docker_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index 5b31932b1..b4ee84fd0 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -120,6 +120,11 @@ insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
]
+# registries is used to specify a comma separated list of registries to be used
+# when pulling an unqualified image (e.g. fedora:rawhide).
+registries = [
+{{ l_additional_crio_registries|default("") }}
+]
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 89993f7ea..b67411f40 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -56,7 +56,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 2bc486d3f..42d27c081 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -29,7 +29,6 @@
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
- when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -37,9 +36,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
- when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+ when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
index f5bcd03ee..6ed87e6c7 100644
--- a/roles/etcd_common/tasks/main.yml
+++ b/roles/etcd_common/tasks/main.yml
@@ -6,4 +6,4 @@
- name: Include main action task file
include: "{{ r_etcd_common_action }}.yml"
- when: '"noop" not in r_etcd_common_action'
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
new file mode 100644
index 000000000..a88d78235
--- /dev/null
+++ b/roles/etcd_common/tasks/noop.yml
@@ -0,0 +1,4 @@
+---
+# This is file is here because the usage of tags, specifically `pre_upgrade`
+# breaks the functionality of this role.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index ddf8230ec..71c8f38c3 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,7 +1,6 @@
---
-flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
-flannel_min_network: 172.30.5.0
-flannel_subnet_len: 24
+flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
+flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
index 89ce4c30b..bba3729fa 100644
--- a/roles/flannel_register/templates/flannel-config.json
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -1,7 +1,6 @@
{
"Network": "{{ flannel_network }}",
"SubnetLen": {{ flannel_subnet_len }},
- "SubnetMin": "{{ flannel_min_network }}",
"Backend": {
"Type": "host-gw"
}
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..921bca074 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
new file mode 100644
index 000000000..696efbea5
--- /dev/null
+++ b/roles/openshift_aws/README.md
@@ -0,0 +1,84 @@
+openshift_aws
+==================================
+
+Provision AWS infrastructure helpers.
+
+Requirements
+------------
+
+* Ansible 2.3
+* Boto
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value
+|---------------------------------------------------|-----------------------
+| openshift_aws_clusterid | default
+| openshift_aws_elb_scheme | internet-facing
+| openshift_aws_launch_config_bootstrap_token | ''
+| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
+| openshift_aws_ami_copy_wait | False
+| openshift_aws_users | []
+| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_create_vpc | False
+| openshift_aws_node_group_type | master
+| openshift_aws_elb_cert_arn | ''
+| openshift_aws_kubernetes_cluster_status | owned
+| openshift_aws_s3_mode | create
+| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_ssh_keys | False
+| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
+| openshift_aws_use_custom_ami | False
+| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
+| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
+| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
+| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
+| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_security_groups | False
+| openshift_aws_create_iam_cert | False
+| openshift_aws_create_scale_group | True
+| openshift_aws_ami_encrypt | False
+| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
+| openshift_aws_region | us-east-1
+| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_elb_idle_timout | 400
+| openshift_aws_subnet_name | us-east-1c
+| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
+| openshift_aws_create_launch_config | True
+| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
+| openshift_aws_ami_name | openshift-gi
+| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
+|
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+- include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ vars:
+ openshift_aws_clusterid: test
+ openshift_aws_region: us-east-1
+ openshift_aws_create_vpc: true
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
new file mode 100644
index 000000000..4e7f54f79
--- /dev/null
+++ b/roles/openshift_aws/defaults/main.yml
@@ -0,0 +1,209 @@
+---
+openshift_aws_create_vpc: True
+openshift_aws_create_s3: True
+openshift_aws_create_iam_cert: True
+openshift_aws_create_security_groups: True
+openshift_aws_create_launch_config: True
+openshift_aws_create_scale_group: True
+openshift_aws_kubernetes_cluster_status: owned # or shared
+openshift_aws_node_group_type: master
+
+openshift_aws_wait_for_ssh: True
+
+openshift_aws_clusterid: default
+openshift_aws_region: us-east-1
+openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
+openshift_aws_iam_cert_path: ''
+openshift_aws_iam_cert_chain_path: ''
+openshift_aws_iam_cert_key_path: ''
+openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+
+openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+openshift_aws_ami: ''
+openshift_aws_ami_copy_wait: False
+openshift_aws_ami_encrypt: False
+openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}"
+openshift_aws_ami_name: openshift-gi
+openshift_aws_base_ami_name: ami_base
+
+openshift_aws_launch_config_bootstrap_token: ''
+openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+
+openshift_aws_users: []
+
+openshift_aws_ami_tags:
+ bootstrap: "true"
+ openshift-created: "true"
+ clusterid: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_s3_mode: create
+openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
+
+openshift_aws_elb_health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+
+openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_idle_timout: 400
+openshift_aws_elb_scheme: internet-facing
+openshift_aws_elb_cert_arn: ''
+
+openshift_aws_elb_listeners:
+ master:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+ - protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ internal:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+
+openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+
+openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+
+openshift_aws_node_group_config:
+ tags: "{{ openshift_aws_node_group_config_tags }}"
+ master:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+ compute:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+ infra:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
+
+openshift_aws_elb_security_groups:
+- "{{ openshift_aws_clusterid }}"
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+
+openshift_aws_elb_instance_filter:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+
+openshift_aws_node_security_groups:
+ default:
+ name: "{{ openshift_aws_clusterid }}"
+ desc: "{{ openshift_aws_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ openshift_aws_clusterid }}"
+ master:
+ name: "{{ openshift_aws_clusterid }}_master"
+ desc: "{{ openshift_aws_clusterid }} master instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ compute:
+ name: "{{ openshift_aws_clusterid }}_compute"
+ desc: "{{ openshift_aws_clusterid }} compute node instances"
+ infra:
+ name: "{{ openshift_aws_clusterid }}_infra"
+ desc: "{{ openshift_aws_clusterid }} infra node instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 30000
+ to_port: 32000
+ cidr_ip: 0.0.0.0/0
+ etcd:
+ name: "{{ openshift_aws_clusterid }}_etcd"
+ desc: "{{ openshift_aws_clusterid }} etcd instances"
+
+openshift_aws_vpc_tags:
+ Name: "{{ openshift_aws_vpc_name }}"
+
+openshift_aws_subnet_name: us-east-1c
+
+openshift_aws_vpc:
+ name: "{{ openshift_aws_vpc_name }}"
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1:
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py
new file mode 100644
index 000000000..06e1f9602
--- /dev/null
+++ b/roles/openshift_aws/filter_plugins/filters.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_aws
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_aws role'''
+
+ @staticmethod
+ def build_instance_tags(clusterid, status='owned'):
+ ''' This function will return a dictionary of the instance tags.
+
+ The main desire to have this inside of a filter_plugin is that we
+ need to build the following key.
+
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+
+ '''
+ tags = {'clusterid': clusterid,
+ 'kubernetes.io/cluster/{}'.format(clusterid): status}
+
+ return tags
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'build_instance_tags': self.build_instance_tags}
diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml
new file mode 100644
index 000000000..875efcb8f
--- /dev/null
+++ b/roles/openshift_aws/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml
new file mode 100644
index 000000000..07020dd62
--- /dev/null
+++ b/roles/openshift_aws/tasks/ami_copy.yml
@@ -0,0 +1,34 @@
+---
+- fail:
+ msg: "{{ item }} needs to be defined"
+ when: item is not defined
+ with_items:
+ - openshift_aws_ami_copy_src_ami
+ - openshift_aws_ami_copy_name
+
+- name: Create IAM KMS key with alias
+ oo_iam_kms:
+ state: present
+ alias: "{{ openshift_aws_iam_kms_alias }}"
+ region: "{{ openshift_aws_region }}"
+ register: created_kms
+
+- debug: var=created_kms.results
+
+- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}"
+ ec2_ami_copy:
+ name: "{{ openshift_aws_ami_copy_name }}"
+ region: "{{ openshift_aws_region }}"
+ source_region: "{{ openshift_aws_ami_copy_src_region }}"
+ source_image_id: "{{ openshift_aws_ami_copy_src_ami }}"
+ encrypted: "{{ openshift_aws_ami_encrypt | bool }}"
+ kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}"
+ wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact
+ set_fact:
+ openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
new file mode 100644
index 000000000..8d4e5ac43
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -0,0 +1,48 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create a vpc
+ include: vpc.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_security_groups | bool
+ name: Create compute security_groups
+ include: security_group.yml
+
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: create instance for ami creation
+ ec2:
+ assign_public_ip: yes
+ region: "{{ openshift_aws_region }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ group: "{{ openshift_aws_clusterid }}"
+ instance_type: m4.xlarge
+ vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ image: "{{ openshift_aws_base_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_type: gp2
+ volume_size: 100
+ delete_on_termination: true
+ wait: yes
+ exact_count: 1
+ count_tag:
+ Name: "{{ openshift_aws_base_ami_name }}"
+ instance_tags:
+ Name: "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
new file mode 100644
index 000000000..0dac1c23d
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -0,0 +1,34 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when: openshift_aws_ami == ''
+ block:
+ - name: fetch recently created AMI
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ sort: creationDate
+ sort_order: descending
+ name: "{{ openshift_aws_ami_name }}*"
+ ami_tags: "{{ openshift_aws_ami_tags }}"
+ no_result_action: fail
+ register: amiout
+
+ - name: Set the openshift_aws_ami
+ set_fact:
+ openshift_aws_ami: "{{ amiout.results[0].ami_id }}"
+ when:
+ - "'results' in amiout"
+ - amiout.results|length > 0
+
+- when: openshift_aws_create_security_groups
+ name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+
+- when: openshift_aws_create_launch_config
+ name: "Create {{ openshift_aws_node_group_type }} launch config"
+ include: launch_config.yml
+
+- when: openshift_aws_create_scale_group
+ name: "Create {{ openshift_aws_node_group_type }} node group"
+ include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
new file mode 100644
index 000000000..a1fdd66fc
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -0,0 +1,68 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: debug
+ debug: var=vpcout
+
+- name: fetch the remote instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters: "{{ openshift_aws_elb_instance_filter }}"
+ register: instancesout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name:
+ debug:
+ msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ openshift_aws_elb_name }}"
+ ec2_elb_lb:
+ name: "{{ openshift_aws_elb_name }}"
+ state: present
+ security_group_names: "{{ openshift_aws_elb_security_groups }}"
+ idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ region: "{{ openshift_aws_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ openshift_aws_elb_health_check }}"
+ listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+ scheme: "{{ openshift_aws_elb_scheme }}"
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+# state when first added to ELB
+- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+ ec2_elb:
+ instance_id: "{{ item.id }}"
+ ec2_elbs: "{{ openshift_aws_elb_name }}"
+ state: present
+ region: "{{ openshift_aws_region }}"
+ wait: False
+ with_items: "{{ instancesout.instances }}"
+ ignore_errors: True
+ retries: 10
+ register: elb_call
+ until: elb_call|succeeded
+
+- debug:
+ msg: "{{ item }}"
+ with_items:
+ - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
new file mode 100644
index 000000000..cd9772a25
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -0,0 +1,29 @@
+---
+- name: upload certificates to AWS IAM
+ iam_cert23:
+ state: present
+ name: "{{ openshift_aws_iam_cert_name }}"
+ cert: "{{ openshift_aws_iam_cert_path }}"
+ key: "{{ openshift_aws_iam_cert_key_path }}"
+ cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}"
+ register: elb_cert_chain
+ failed_when:
+ - "'failed' in elb_cert_chain"
+ - elb_cert_chain.failed
+ - "'msg' in elb_cert_chain"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg"
+ - "'BotoServerError' in elb_cert_chain.msg"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
+
+- name: set_fact openshift_aws_elb_cert_arn
+ set_fact:
+ openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+
+- name: wait for cert to propagate
+ pause:
+ seconds: 5
+ when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
new file mode 100644
index 000000000..65c5a6cc0
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -0,0 +1,45 @@
+---
+- fail:
+ msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
+ when:
+ - openshift_aws_ami is undefined
+
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_launch_config_name }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami }}"
+ instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
+ security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ user_data: |-
+ #cloud-config
+ {% if openshift_aws_node_group_type != 'master' %}
+ write_files:
+ - path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+ - path: /root/openshift_settings
+ owner: root:root
+ permissions: '0640'
+ content:
+ openshift_type: "{{ openshift_aws_node_group_type }}"
+ runcmd:
+ - [ systemctl, enable, atomic-openshift-node]
+ - [ systemctl, start, atomic-openshift-node]
+ {% endif %}
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
new file mode 100644
index 000000000..189caeaee
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -0,0 +1,54 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create default vpc
+ include: vpc.yml
+
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include: iam_cert.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_s3 | bool
+ name: create s3 bucket for registry
+ include: s3.yml
+
+- name: include scale group creation for master
+ include: build_node_group.yml
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: create our master internal load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: internal
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ openshift_aws_elb_scheme: internal
+
+- name: create our master external load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ openshift_aws_elb_scheme: internet-facing
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+ when: openshift_aws_wait_for_ssh | bool
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
new file mode 100644
index 000000000..fc4996c68
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -0,0 +1,66 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: fetch master instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: slurp down the bootstrap.kubeconfig
+ slurp:
+ src: /etc/origin/master/bootstrap.kubeconfig
+ delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+ remote_user: root
+ register: bootstrap
+
+- name: set_fact for kubeconfig token
+ set_fact:
+ openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
+
+- name: include build node group for infra
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: infra
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+
+- name: include build node group for compute
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_wait_for_ssh | bool
+ block:
+ - name: pause and allow for instances to scale before we query them
+ pause:
+ seconds: 10
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": node
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
new file mode 100644
index 000000000..9cf37c840
--- /dev/null
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -0,0 +1,7 @@
+---
+- name: Create an s3 bucket
+ s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: "{{ openshift_aws_s3_mode }}"
+ region: "{{ openshift_aws_region }}"
+ when: openshift_aws_create_s3 | bool
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
new file mode 100644
index 000000000..3e969fc43
--- /dev/null
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -0,0 +1,32 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the subnet to use in scale group
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: Create the scale group
+ ec2_asg:
+ name: "{{ openshift_aws_scale_group_name }}"
+ launch_config_name: "{{ openshift_aws_launch_config_name }}"
+ health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
+ health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
+ min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
+ max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
+ desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+ region: "{{ openshift_aws_region }}"
+ termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ tags:
+ - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
new file mode 100644
index 000000000..0cb749dcc
--- /dev/null
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -0,0 +1,49 @@
+---
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: bundle ami
+ ec2_ami:
+ instance_id: "{{ instancesout.instances.0.id }}"
+ region: "{{ openshift_aws_region }}"
+ state: present
+ description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+ name: "{{ openshift_aws_ami_name }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ wait: yes
+ register: amioutput
+
+- debug: var=amioutput
+
+- when: openshift_aws_ami_encrypt | bool
+ block:
+ - name: augment the encrypted ami tags with source-ami
+ set_fact:
+ source_tag:
+ source-ami: "{{ amioutput.image_id }}"
+
+ - name: copy the ami for encrypted disks
+ include: ami_copy.yml
+ vars:
+ openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
+ openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+ # TODO: How does the kms alias get passed to ec2_ami_copy
+ openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+ openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}"
+ # this option currently fails due to boto waiters
+ # when supported this need to be reapplied
+ #openshift_aws_ami_copy_wait: True
+
+- name: terminate temporary instance
+ ec2:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ instance_ids: "{{ instancesout.instances.0.id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
new file mode 100644
index 000000000..161e72fb4
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -0,0 +1,45 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+
+- name: Create default security group for cluster
+ ec2_group:
+ name: "{{ openshift_aws_node_security_groups.default.name }}"
+ description: "{{ openshift_aws_node_security_groups.default.desc }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
+ register: sg_default_created
+
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.name}}"
+ description: "{{ item.desc }}"
+ rules: "{{ item.rules if 'rules' in item else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.name }}_k8s"
+ description: "{{ item.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: k8s_sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml
index 232cf20ed..f439ce74e 100644
--- a/roles/openshift_aws_ssh_keys/tasks/main.yml
+++ b/roles/openshift_aws/tasks/ssh_keys.yml
@@ -3,6 +3,6 @@
ec2_key:
name: "{{ item.key_name }}"
key_material: "{{ item.pub_key }}"
- region: "{{ r_openshift_aws_ssh_keys_region }}"
- with_items: "{{ r_openshift_aws_ssh_keys_users }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
no_log: True
diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml
index cfe08dae5..ce2c8eac5 100644
--- a/roles/openshift_aws_vpc/tasks/main.yml
+++ b/roles/openshift_aws/tasks/vpc.yml
@@ -2,13 +2,12 @@
- name: Create AWS VPC
ec2_vpc_net:
state: present
- cidr_block: "{{ r_openshift_aws_vpc_cidr }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
dns_support: True
dns_hostnames: True
- region: "{{ r_openshift_aws_vpc_region }}"
- name: "{{ r_openshift_aws_vpc_clusterid }}"
- tags:
- Name: "{{ r_openshift_aws_vpc_clusterid }}"
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_vpc_tags }}"
register: vpc
- name: Sleep to avoid a race condition when creating the vpc
@@ -18,23 +17,23 @@
- name: assign the vpc igw
ec2_vpc_igw:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
register: igw
- name: assign the vpc subnets
ec2_vpc_subnet:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
cidr: "{{ item.cidr }}"
az: "{{ item.az }}"
resource_tags:
Name: "{{ item.az }}"
- with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
- name: Grab the route tables from our VPC
ec2_vpc_route_table_facts:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
filters:
vpc-id: "{{ vpc.vpc.id }}"
register: route_table
@@ -44,9 +43,9 @@
lookup: id
route_table_id: "{{ route_table.route_tables[0].id }}"
vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
tags:
- Name: "{{ r_openshift_aws_vpc_name }}"
+ Name: "{{ openshift_aws_vpc_name }}"
routes:
- dest: 0.0.0.0/0
gateway_id: igw
diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md
deleted file mode 100644
index 111818451..000000000
--- a/roles/openshift_aws_ami_copy/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-openshift_aws_ami_perms
-=========
-
-Ansible role for copying an AMI
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- openshift_aws_ami_copy_src_ami: source AMI id to copy from
-- openshift_aws_ami_copy_region: region where the AMI is found
-- openshift_aws_ami_copy_name: name to assign to new AMI
-- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption
-- openshift_aws_ami_copy_tags: dict with desired tags
-- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters.
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: us-east-1
- r_openshift_aws_ami_copy_name: myami
- r_openshift_aws_ami_copy_src_ami: ami-1234
- r_openshift_aws_ami_copy_kms_arn: arn:xxxx
- r_openshift_aws_ami_copy_tags: {}
- r_openshift_aws_ami_copy_encrypt: False
-
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml
deleted file mode 100644
index bcccd4042..000000000
--- a/roles/openshift_aws_ami_copy/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- fail:
- msg: "{{ item }} needs to be defined"
- when: item is not defined
- with_items:
- - r_openshift_aws_ami_copy_src_ami
- - r_openshift_aws_ami_copy_name
- - r_openshift_aws_ami_copy_region
-
-- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}"
- ec2_ami_copy:
- region: "{{ r_openshift_aws_ami_copy_region }}"
- source_region: "{{ r_openshift_aws_ami_copy_region }}"
- name: "{{ r_openshift_aws_ami_copy_name }}"
- source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}"
- encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}"
- kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}"
- wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}"
- tags: "{{ r_openshift_aws_ami_copy_tags }}"
- register: copy_result
-
-- debug: var=copy_result
-
-- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami
- set_fact:
- r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md
deleted file mode 100644
index ecc45fa14..000000000
--- a/roles/openshift_aws_elb/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-openshift_aws_elb
-=========
-
-Ansible role to provision and manage AWS ELB's for Openshift.
-
-Requirements
-------------
-
-Ansible Modules:
-
-- ec2_elb
-- ec2_elb_lb
-
-python package:
-
-python-boto
-
-Role Variables
---------------
-
-- r_openshift_aws_elb_instances: instances to put in ELB
-- r_openshift_aws_elb_elb_name: name of elb
-- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to
-- r_openshift_aws_elb_region: AWS Region
-- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb
-```yaml
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-```
-- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb
-```yaml
-- protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
-- protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_instances: aws_instances_to_put_in_elb
- r_openshift_aws_elb_elb_name: elb_name
- r_openshift_aws_elb_security_groups: security_group_names
- r_openshift_aws_elb_region: aws_region
- r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}"
- r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml
deleted file mode 100644
index ed5d38079..000000000
--- a/roles/openshift_aws_elb/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-r_openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
-r_openshift_aws_elb_cert_arn: ''
-
-r_openshift_aws_elb_listeners:
- master:
- external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
- internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml
deleted file mode 100644
index 58be652a5..000000000
--- a/roles/openshift_aws_elb/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift ELB provisioning
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml
deleted file mode 100644
index 64ec18545..000000000
--- a/roles/openshift_aws_elb/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: fetch the default subnet id
- ec2_remote_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters: "{{ r_openshift_aws_elb_instance_filter }}"
- register: instancesout
-
-- name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}"
- register: subnetout
-
-- name:
- debug:
- msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
-
-- name: "Create ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb_lb:
- name: "{{ r_openshift_aws_elb_name }}"
- state: present
- security_group_names: "{{ r_openshift_aws_elb_security_groups }}"
- idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}"
- region: "{{ r_openshift_aws_elb_region }}"
- subnets:
- - "{{ subnetout.subnets[0].id }}"
- health_check: "{{ r_openshift_aws_elb_health_check }}"
- listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
- scheme: "{{ r_openshift_aws_elb_scheme }}"
- tags:
- KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}"
- register: new_elb
-
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-# state when first added to ELB
-- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb:
- instance_id: "{{ item.id }}"
- ec2_elbs: "{{ r_openshift_aws_elb_name }}"
- state: present
- region: "{{ r_openshift_aws_elb_region }}"
- wait: False
- with_items: "{{ instancesout.instances }}"
- ignore_errors: True
- retries: 10
- register: elb_call
- until: elb_call|succeeded
-
-- debug:
- msg: "{{ item }}"
- with_items:
- - "{{ new_elb }}"
diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md
deleted file mode 100644
index 9468e785c..000000000
--- a/roles/openshift_aws_iam_kms/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_iam_kms
-=========
-
-Ansible role to create AWS IAM KMS keys for encryption
-
-Requirements
-------------
-
-Ansible Modules:
-
-oo_iam_kms
-
-Role Variables
---------------
-
-- r_openshift_aws_iam_kms_region: AWS region to create KMS key
-- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key
-
-Dependencies
-------------
-
-lib_utils
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: 'us-east-1'
- r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms'
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_iam_kms/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml
deleted file mode 100644
index e29aaf96b..000000000
--- a/roles/openshift_aws_iam_kms/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: AWS IAM KMS setup and management
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- lib_utils
diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml
deleted file mode 100644
index 32aac2666..000000000
--- a/roles/openshift_aws_iam_kms/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- fail:
- msg: "{{ item.name }} needs to be defined."
- when: item.cond | bool
- with_items:
- - name: "{{ r_openshift_aws_iam_kms_alias }}"
- cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}"
- - name: "{{ r_openshift_aws_iam_kms_region }}"
- cond: "{{ r_openshift_aws_iam_kms_region is undefined }}"
-
-- name: Create IAM KMS key with alias
- oo_iam_kms:
- state: present
- alias: "{{ r_openshift_aws_iam_kms_alias }}"
- region: "{{ r_openshift_aws_iam_kms_region }}"
- register: created_kms
-
-- debug: var=created_kms.results
diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md
deleted file mode 100644
index 52b7e83b6..000000000
--- a/roles/openshift_aws_launch_config/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-openshift_aws_launch_config
-=========
-
-Ansible role to create an AWS launch config for a scale group.
-
-This includes the AMI, volumes, user_data, etc.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
-- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
-- r_openshift_aws_launch_config_region: "{{ region }}"
-- r_openshift_aws_launch_config: "{{ node_group_config }}"
-```yaml
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_launch_config_type: compute
-- r_openshift_aws_launch_config_custom_image: ami-xxxxx
-- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig>
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: create compute nodes config
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
- r_openshift_aws_launch_config_region: "{{ region }}"
- r_openshift_aws_launch_config: "{{ node_group_config }}"
- r_openshift_aws_launch_config_type: compute
- r_openshift_aws_launch_config_custom_image: ami-1234
- r_openshift_aws_launch_config_bootstrap_token: abcd
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_launch_config/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml
deleted file mode 100644
index e61670cc2..000000000
--- a/roles/openshift_aws_launch_config/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift AWS VPC creation
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.3
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml
deleted file mode 100644
index 437cf1f71..000000000
--- a/roles/openshift_aws_launch_config/tasks/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: fail when params are not set
- fail:
- msg: Please specify the role parameters.
- when:
- - r_openshift_aws_launch_config_cluseterid is undefined
- - r_openshift_aws_launch_config_type is undefined
- - r_openshift_aws_launch_config_region is undefined
- - r_openshift_aws_launch_config is undefined
-
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name:
- - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s
- region: "{{ r_openshift_aws_launch_config_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group config
- ec2_lc:
- name: "{{ r_openshift_aws_launch_config_name }}"
- region: "{{ r_openshift_aws_launch_config_region }}"
- image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}"
- instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}"
- security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
- user_data: |-
- #cloud-config
- {% if r_openshift_aws_launch_config_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ r_openshift_aws_launch_config_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
- key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}"
- assign_public_ip: True
- register: test
diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2
deleted file mode 100644
index 1a1e29550..000000000
--- a/roles/openshift_aws_launch_config/templates/cloud-init.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %}
-#cloud-config
-write_files:
-- path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: |-
- {{ r_openshift_aws_launch_config_bootstrap_token }}
-{% endif %}
diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md
deleted file mode 100644
index c32c57bc5..000000000
--- a/roles/openshift_aws_node_group/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-openshift_aws_node_group
-=========
-
-Ansible role to create an aws node group.
-
-This includes the security group, launch config, and scale group.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-```yaml
-- r_openshift_aws_node_group_name: myscalegroup
-- r_openshift_aws_node_group_clusterid: myclusterid
-- r_openshift_aws_node_group_region: us-east-1
-- r_openshift_aws_node_group_lc_name: launch_config
-- r_openshift_aws_node_group_type: master|infra|compute
-- r_openshift_aws_node_group_config: "{{ node_group_config }}"
-```yaml
-master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-
-```yaml
-us-east-1a # name of subnet
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ clusterid }}"
- r_openshift_aws_node_group_region: "{{ region }}"
- r_openshift_aws_node_group_config: "{{ node_group_config }}"
- r_openshift_aws_node_group_type: compute
- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml
deleted file mode 100644
index 44c5116a1..000000000
--- a/roles/openshift_aws_node_group/defaults/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-r_openshift_aws_node_group_type: master
-
-r_openshift_aws_node_group_config:
- tags:
- clusterid: "{{ r_openshift_aws_node_group_clusterid }}"
- master:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
- compute:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- infra:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml
deleted file mode 100644
index 6f5364b03..000000000
--- a/roles/openshift_aws_node_group/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: validate role inputs
- fail:
- msg: Please pass in the required role variables
- when:
- - r_openshift_aws_node_group_clusterid is not defined
- - r_openshift_aws_node_group_region is not defined
- - r_openshift_aws_node_group_subnet_name is not defined
-
-- name: fetch the subnet to use in scale group
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_node_group_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}"
- register: subnetout
-
-- name: Create the scale group
- ec2_asg:
- name: "{{ r_openshift_aws_node_group_name }}"
- launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}"
- health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}"
- health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}"
- min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}"
- max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}"
- desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}"
- region: "{{ r_openshift_aws_node_group_region }}"
- termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}"
- vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
- tags:
- - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md
deleted file mode 100644
index afafe61cf..000000000
--- a/roles/openshift_aws_s3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_s3
-=========
-
-Ansible role to create an s3 bucket
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_s3_clusterid: myclusterid
-- r_openshift_aws_s3_region: us-east-1
-- r_openshift_aws_s3_mode: create|delete
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create an s3 bucket
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: mycluster
- r_openshift_aws_s3_region: us-east-1
- r_openshift_aws_s3_mode: create
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml
deleted file mode 100644
index 46bd781bd..000000000
--- a/roles/openshift_aws_s3/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create an s3 bucket
- s3:
- bucket: "{{ r_openshift_aws_s3_clusterid }}"
- mode: "{{ r_openshift_aws_s3_mode }}"
- region: "{{ r_openshift_aws_s3_region }}"
diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md
deleted file mode 100644
index eeb76bbb6..000000000
--- a/roles/openshift_aws_sg/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-openshift_aws_sg
-=========
-
-Ansible role to create an aws security groups
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_sg_clusterid: myclusterid
-- r_openshift_aws_sg_region: us-east-1
-- r_openshift_aws_sg_type: master|infra|compute
-```yaml
-# defaults/main.yml
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
-```
-
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create security groups for master
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: mycluster
- r_openshift_aws_sg_region: us-east-1
- r_openshift_aws_sg_type: master
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml
deleted file mode 100644
index 9c480d337..000000000
--- a/roles/openshift_aws_sg/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-r_openshift_aws_sg_sg:
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
- master:
- name: "{{ r_openshift_aws_sg_clusterid }}_master"
- desc: "{{ r_openshift_aws_sg_clusterid }} master instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- compute:
- name: "{{ r_openshift_aws_sg_clusterid }}_compute"
- desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances"
- infra:
- name: "{{ r_openshift_aws_sg_clusterid }}_infra"
- desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 30000
- to_port: 32000
- cidr_ip: 0.0.0.0/0
- etcd:
- name: "{{ r_openshift_aws_sg_clusterid }}_etcd"
- desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances"
diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml
deleted file mode 100644
index 2294fdcc9..000000000
--- a/roles/openshift_aws_sg/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Validate role inputs
- fail:
- msg: Please ensure to pass the correct variables
- when:
- - r_openshift_aws_sg_region is undefined
- - r_openshift_aws_sg_region is undefined
-
-
-- name: Fetch the VPC for vpc.id
- ec2_vpc_net_facts:
- region: "{{ r_openshift_aws_sg_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_sg_clusterid }}"
- register: vpcout
-
-- name: Create default security group for cluster
- ec2_group:
- name: "{{ r_openshift_aws_sg_sg.default.name }}"
- description: "{{ r_openshift_aws_sg_sg.default.desc }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}"
- register: sg_default_created
-
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.name}}"
- description: "{{ item.desc }}"
- rules: "{{ item.rules if 'rules' in item else [] }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.name }}_k8s"
- description: "{{ item.desc }} for k8s"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: k8s_sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}"
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags:
- KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}"
- resource: "{{ item.group_id }}"
- region: "{{ r_openshift_aws_sg_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md
deleted file mode 100644
index 4f8667918..000000000
--- a/roles/openshift_aws_ssh_keys/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-openshift_aws_ssh_keys
-=========
-
-Ansible role for sshind SSH keys
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_ssh_keys_users: list of dicts of users
-- r_openshift_aws_ssh_keys_region: ec2_region to install the keys
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-users:
-- username: user1
- pub_key: <user1 ssh public key>
-- username: user2
- pub_key: <user2 ssh public key>
-
-region: us-east-1
-
-- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ users }}"
- r_openshift_aws_ssh_keys_region: "{{ region }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md
deleted file mode 100644
index d88cf0581..000000000
--- a/roles/openshift_aws_vpc/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-openshift_aws_vpc
-=========
-
-Ansible role to create a default AWS VPC
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_vpc_clusterid: "{{ clusterid }}"
-- r_openshift_aws_vpc_cidr: 172.31.48.0/20
-- r_openshift_aws_vpc_subnets: "{{ subnets }}"
-```yaml
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
-```
-- r_openshift_aws_vpc_region: "{{ region }}"
-- r_openshift_aws_vpc_tags: dict of tags to apply to vpc
-- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}"
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-```yaml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: mycluster
- r_openshift_aws_vpc_cidr: 172.31.48.0/20
- r_openshift_aws_vpc_subnets: "{{ subnets }}"
- r_openshift_aws_vpc_region: us-east-1
- r_openshift_aws_vpc_tags: {}
- r_openshift_aws_vpc_name: mycluster
-
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_vpc/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
index 8aa57e75a..b82c2e602 100644
--- a/roles/openshift_cfme/defaults/main.yml
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -27,9 +27,6 @@ openshift_cfme_pv_data:
# Tuning parameter to use more than 5 images at once from an ImageStream
openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
-openshift_cfme_nfs_directory: "/exports"
# TODO: Refactor '_install_app' variable. This is just for testing but
# maybe in the future it should control the entire yes/no for CFME.
#
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml
index 9200f2c3c..162d817f0 100644
--- a/roles/openshift_cfme/meta/main.yml
+++ b/roles/openshift_cfme/meta/main.yml
@@ -16,5 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: openshift_common
- role: openshift_master_facts
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
index 8db45492e..ca04628a8 100644
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ b/roles/openshift_cfme/tasks/nfs.yml
@@ -1,6 +1,13 @@
---
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
+
+- name: Set openshift_cfme_nfs_server fact
+ when: openshift_cfme_nfs_server is not defined
+ set_fact:
+ # Hostname/IP of the NFS server. Currently defaults to first master
+ openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
+
- name: Ensure the /exports/ directory exists
file:
path: /exports/
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 04a1ce873..29ed82783 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -14,5 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_docker
when: not skip_docker_role | default(False) | bool
-- role: openshift_common
- role: openshift_facts
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
deleted file mode 100644
index 2a271854b..000000000
--- a/roles/openshift_common/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-OpenShift/Atomic Enterprise Common
-===================================
-
-OpenShift/Atomic Enterprise common installation and configuration tasks.
-
-Requirements
-------------
-
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
-
-Role Variables
---------------
-
-| Name | Default value | |
-|---------------------------|-------------------|---------------------------------------------|
-| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
-| openshift_ip | UNDEF | Internal IP address to use for this host |
-| openshift_public_hostname | UNDEF | Public hostname to use for this host |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_portal_net | UNDEF | Service IP CIDR |
-
-Dependencies
-------------
-
-os_firewall
-openshift_facts
-openshift_repos
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
deleted file mode 100644
index 267c03605..000000000
--- a/roles/openshift_common/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_cluster_id: 'default'
-openshift_debug_level: 2
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
deleted file mode 100644
index 7cc95d8fa..000000000
--- a/roles/openshift_common/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Common
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.7
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
deleted file mode 100644
index a0bd6c860..000000000
--- a/roles/openshift_common/tasks/main.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
-- fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
-- name: Set common Cluster facts
- openshift_facts:
- role: common
- local_facts:
- install_examples: "{{ openshift_install_examples | default(True) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_flannel: "{{ openshift_use_flannel | default(None) }}"
- use_calico: "{{openshift_use_calico | default(None) }}"
- use_nuage: "{{ openshift_use_nuage | default(None) }}"
- use_contiv: "{{ openshift_use_contiv | default(None) }}"
- use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
- data_dir: "{{ openshift_data_dir | default(None) }}"
- use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
-
-- name: Install the base package for versioning
- package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Set version facts
- openshift_facts:
-
-# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
-# hostname by default.
-- set_fact:
- set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}"
-
-- name: Set hostname
- command: >
- hostnamectl set-hostname {{ openshift.common.hostname }}
- when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 5cfda1c89..f3fe2dcbe 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index cf78b4a75..517e0231d 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -449,78 +449,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-def set_flannel_facts_if_unset(facts):
- """ Set flannel facts if not already present in facts dict
- dict: the facts dict updated with the flannel facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the flannel
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_flannel' not in facts['common']:
- use_flannel = False
- facts['common']['use_flannel'] = use_flannel
- return facts
-
-
-def set_calico_facts_if_unset(facts):
- """ Set calico facts if not already present in facts dict
- dict: the facts dict updated with the calico facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the calico
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_calico' not in facts['common']:
- use_calico = False
- facts['common']['use_calico'] = use_calico
- return facts
-
-
-def set_nuage_facts_if_unset(facts):
- """ Set nuage facts if not already present in facts dict
- dict: the facts dict updated with the nuage facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the nuage
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_nuage' not in facts['common']:
- use_nuage = False
- facts['common']['use_nuage'] = use_nuage
- return facts
-
-
-def set_contiv_facts_if_unset(facts):
- """ Set contiv facts if not already present in facts dict
- dict: the facts dict updated with the contiv facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the contiv
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_contiv' not in facts['common']:
- use_contiv = False
- facts['common']['use_contiv'] = use_contiv
- return facts
-
-
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -590,13 +518,8 @@ def set_dnsmasq_facts_if_unset(facts):
"""
if 'common' in facts:
- if 'use_dnsmasq' not in facts['common']:
- facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
if 'master' in facts and 'dns_port' not in facts['master']:
- if safe_get_bool(facts['common']['use_dnsmasq']):
- facts['master']['dns_port'] = 8053
- else:
- facts['master']['dns_port'] = 53
+ facts['master']['dns_port'] = 8053
return facts
@@ -968,27 +891,6 @@ def set_version_facts_if_unset(facts):
return facts
-def set_manageiq_facts_if_unset(facts):
- """ Set manageiq facts. This currently includes common.use_manageiq.
-
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with version facts.
- Raises:
- OpenShiftFactsInternalError:
- """
- if 'common' not in facts:
- if 'version_gte_3_1_or_1_1' not in facts['common']:
- raise OpenShiftFactsInternalError(
- "Invalid invocation: The required facts are not set"
- )
- if 'use_manageiq' not in facts['common']:
- facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
-
- return facts
-
-
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -999,15 +901,6 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
- # pylint: disable=too-many-branches
- if 'common' in facts:
- use_sdn = facts['common']['use_openshift_sdn']
- if not (use_sdn == '' or isinstance(use_sdn, bool)):
- use_sdn = safe_get_bool(use_sdn)
- facts['common']['use_openshift_sdn'] = use_sdn
- if 'sdn_network_plugin_name' not in facts['common']:
- plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
- facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
@@ -1709,11 +1602,13 @@ def set_builddefaults_facts(facts):
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
+
+ # 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
+ # Scaffold out the full expected datastructure
+ facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1996,10 +1891,6 @@ class OpenShiftFacts(object):
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
- facts = set_flannel_facts_if_unset(facts)
- facts = set_calico_facts_if_unset(facts)
- facts = set_nuage_facts_if_unset(facts)
- facts = set_contiv_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
@@ -2011,7 +1902,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
- facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
@@ -2039,7 +1929,7 @@ class OpenShiftFacts(object):
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
- defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
+ defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
@@ -2048,10 +1938,8 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- install_examples=True,
debug_level=2,
- config_base='/etc/origin',
- data_dir='/var/lib/origin')
+ config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..eacf84b4d
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..e68e9683f
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Create SSH key for GCE
+if [ ! -f "{{ gce_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ gce_ssh_private_key }}" || true
+fi
+
+# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
+key_tmp_file='/tmp/ocp-gce-keys'
+if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+fi
+
+metadata=""
+if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+fi
+if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
+ echo "User data file missing at {{ provision_gce_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ provision_gce_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ provision_gce_registered_image }}"
+if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+else
+ echo "Network '{{ gce_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in provision_gce_node_groups %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
+ --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
+ --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
+ gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+else
+ echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+else
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+else
+ echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in provision_gce_node_groups %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..41ceab2b5
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in provision_gce_node_groups %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+
+# Internal master network rules
+teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in provision_gce_node_groups %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
+ teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ gce_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 8d35db6b5..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,9 +197,11 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
- if name in user_disabled_checks:
+
+ # determine if we're going to run the check (not inactive or disabled)
+ if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
# pylint: disable=broad-except; capturing exceptions broadly is intentional,
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 349655966..dcaf87eca 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@ import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@ def deduplicate_failures(failures):
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@ def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index c8769b511..db3c0b654 100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -26,15 +26,13 @@ from ansible.module_utils.six import string_types
YUM_IMPORT_EXCEPTION = None
DNF_IMPORT_EXCEPTION = None
-PKG_MGR = None
try:
import yum # pylint: disable=import-error
- PKG_MGR = "yum"
except ImportError as err:
YUM_IMPORT_EXCEPTION = err
+
try:
import dnf # pylint: disable=import-error
- PKG_MGR = "dnf"
except ImportError as err:
DNF_IMPORT_EXCEPTION = err
@@ -51,14 +49,19 @@ def main():
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
+ package_mgr=dict(type="str", required=True),
),
supports_check_mode=True
)
- if YUM_IMPORT_EXCEPTION and DNF_IMPORT_EXCEPTION:
+ # determine the package manager to use
+ package_mgr = module.params['package_mgr']
+ if package_mgr not in ('yum', 'dnf'):
+ module.fail_json(msg="package_mgr must be one of: yum, dnf")
+ pkg_mgr_exception = dict(yum=YUM_IMPORT_EXCEPTION, dnf=DNF_IMPORT_EXCEPTION)[package_mgr]
+ if pkg_mgr_exception:
module.fail_json(
- msg="aos_version module could not import yum or dnf: %s %s" %
- (YUM_IMPORT_EXCEPTION, DNF_IMPORT_EXCEPTION)
+ msg="aos_version module could not import {}: {}".format(package_mgr, pkg_mgr_exception)
)
# determine the packages we will look for
@@ -78,7 +81,7 @@ def main():
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkg_names)
+ pkgs = _retrieve_available_packages(package_mgr, expected_pkg_names)
if versioned_pkgs:
_check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
_check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
@@ -93,7 +96,7 @@ def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
-def _retrieve_available_packages(expected_pkgs):
+def _retrieve_available_packages(pkg_mgr, expected_pkgs):
# The openshift excluder prevents unintended updates to openshift
# packages by setting yum excludes on those packages. See:
# https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
@@ -103,14 +106,15 @@ def _retrieve_available_packages(expected_pkgs):
# be excluded. So, for our purposes here, disable excludes to see
# what will really be available during an install or upgrade.
- if PKG_MGR == "yum":
+ if pkg_mgr == "yum":
# search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
yb.conf.disable_excludes = ['all']
try:
- pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ pkgs = yb.rpmdb.returnPackages(patterns=expected_pkgs)
+ pkgs += yb.pkgSack.returnPackages(patterns=expected_pkgs)
except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
raise AosVersionException('\n'.join([
@@ -118,7 +122,7 @@ def _retrieve_available_packages(expected_pkgs):
'Check your subscription and repo settings.',
str(excinfo),
]))
- elif PKG_MGR == "dnf":
+ elif pkg_mgr == "dnf":
dbase = dnf.Base() # pyling: disable=invalid-name
dbase.conf.disable_excludes = ['all']
@@ -127,8 +131,11 @@ def _retrieve_available_packages(expected_pkgs):
dquery = dbase.sack.query()
aquery = dquery.available()
+ iquery = dquery.installed()
- pkgs = list(aquery.filter(name=expected_pkgs))
+ available_pkgs = list(aquery.filter(name=expected_pkgs))
+ installed_pkgs = list(iquery.filter(name=expected_pkgs))
+ pkgs = available_pkgs + installed_pkgs
if not pkgs:
# pkgs list is empty, raise because no expected packages found
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 02ee1d0f9..28cb53cc5 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,8 +2,11 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
+import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
@@ -27,7 +30,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -40,26 +43,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
+ # mainly for testing purposes; see execute_module_with_retries
+ self._module_retries = 3
+ self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -82,7 +112,13 @@ class OpenShiftCheck(object):
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -94,7 +130,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -106,6 +178,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -113,7 +191,33 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
+
+ def execute_module_with_retries(self, module_name, module_args):
+ """Run execute_module and retry on failure."""
+ result = {}
+ tries = 0
+ while True:
+ res = self.execute_module(module_name, module_args)
+ if tries > self._module_retries or not res.get("failed"):
+ result.update(res)
+ return result
+ result["last_failed"] = res
+ tries += 1
+ time.sleep(self._module_retry_interval)
def get_var(self, *keys, **kwargs):
"""Get deeply nested values from task_vars.
@@ -171,8 +275,12 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -214,7 +322,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -242,7 +352,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..cdf56e959 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck):
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 857a80c74..9c35f0f92 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,6 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+
+ def __init__(self, *args, **kwargs):
+ super(DockerImageAvailability, self).__init__(*args, **kwargs)
+ # record whether we could reach a registry or not (and remember results)
+ self.reachable_registries = {}
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -63,13 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- return {
- "failed": True,
- "msg": (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
- }
+ registries = [
+ reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
+ for reg in registries
+ ]
+ msg = (
+ "One or more required Docker images are not available:\n {}\n"
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(
+ ",\n ".join(sorted(unavailable_images)),
+ ", ".join(registries),
+ self.skopeo_img_check_command
+ )
+
+ return dict(failed=True, msg=msg)
return {}
@@ -125,31 +139,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- return [
- image for image in images
- if self.is_image_local(image)
- ]
+ registries = self.known_docker_registries()
+ found_images = []
+ for image in images:
+ # docker could have the image name as-is or prefixed with any registry
+ imglist = [image] + [reg + "/" + image for reg in registries]
+ if self.is_image_local(imglist):
+ found_images.append(image)
+ return found_images
def is_image_local(self, image):
"""Check if image is already in local docker index."""
result = self.execute_module("docker_image_facts", {"name": image})
- if result.get("failed", False):
- return False
-
- return bool(result.get("images", []))
+ return bool(result.get("images")) and not result.get("failed")
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = self.get_var("openshift", "docker")
- regs = set(docker_facts["additional_registries"])
+ regs = list(self.get_var("openshift.docker.additional_registries", default=[]))
deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin":
- regs.update(["docker.io"])
- elif "enterprise" in deployment_type:
- regs.update(["registry.access.redhat.com"])
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
- return list(regs)
+ return regs
def available_images(self, images, default_registries):
"""Search remotely for images. Returns: list of images found."""
@@ -162,18 +176,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Use Skopeo to determine if required image exists in known registry(s)."""
registries = default_registries
- # if image already includes a registry, only use that
+ # If image already includes a registry, only use that.
+ # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
+ # registry.access.redhat.com/rhel7 as if the registry were a namespace.
+ # It's not clear that there's any way to distinguish them, but fortunately
+ # the current set of images all look like [registry/]namespace/name[:version].
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
- args = {
- "_raw_params": "timeout 10 skopeo inspect --tls-verify=false "
- "docker://{}/{}".format(registry, image)
- }
- result = self.execute_module("command", args)
+ if registry not in self.reachable_registries:
+ self.reachable_registries[registry] = self.connect_to_registry(registry)
+ if not self.reachable_registries[registry]:
+ continue
+
+ args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
+ result = self.execute_module_with_retries("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
+ if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
+ self.reachable_registries[registry] = False
return False
+
+ def connect_to_registry(self, registry):
+ """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ # test a simple TCP connection
+ host, _, port = registry.partition(":")
+ port = port or 443
+ args = dict(host=host, port=port, state="started", timeout=30)
+ result = self.execute_module("wait_for", args)
+ return result.get("rc", 0) == 0 and not result.get("failed")
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..06bdfebf6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -78,7 +78,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +92,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index e9bae60a3..b90ebf6dd 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -36,7 +36,7 @@ class DockerHostMixin(object):
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
- result = self.execute_module(
+ result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
@@ -49,5 +49,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a86180b00..21355c2f0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 1e9aecbe0..8464e8a5e 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 8b780114f..d4aec3ed8 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -46,6 +46,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
+ "package_mgr": self.get_var("ansible_pkg_mgr"),
"package_list": [
{
"name": "openvswitch",
@@ -75,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args)
+ return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index c109ebd24..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -98,23 +111,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
@@ -123,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -140,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -153,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -166,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -249,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 8d0a53df9..6a7c16c7e 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,6 +3,23 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
@@ -15,12 +32,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@@ -30,10 +45,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +57,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,53 +68,36 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
+def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
- def execute_module(module_name=None, *_):
- if module_name == "command":
- return {
- 'failed': True,
- }
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
- return {
- 'changed': False,
- }
+ return {} # docker_image_facts failure
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='latest',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@@ -125,62 +113,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
+def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
+ ),
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
- return {
- 'changed': False,
- }
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
- ),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -271,4 +260,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 1fe648b75..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -49,14 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 06489b0d7..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
result = PackageUpdate(execute_module).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 6054d3f3e..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -5,6 +5,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
return dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type=deployment_type)),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
@@ -27,6 +28,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
@@ -50,7 +52,7 @@ def test_invalid_openshift_release_format():
])
def test_package_version(openshift_release):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
@@ -64,7 +66,7 @@ def test_package_version(openshift_release):
check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
@@ -77,7 +79,7 @@ def test_package_version(openshift_release):
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
@@ -91,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
index 0fc258133..69f27653c 100644
--- a/roles/openshift_health_checker/test/zz_failure_summary_test.py
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -65,6 +65,21 @@ import pytest
},
],
),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 3e5d7f860..29ae58556 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -39,7 +39,6 @@ variables also control configuration behavior:
Dependencies
------------
-* openshift_common
* openshift_hosted_facts
Example Playbook
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index c26df3afa..712a2a591 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
-openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
registry_volume_claim: 'registry-claim'
@@ -47,3 +47,9 @@ r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
+
+# NOTE
+# r_openshift_hosted_use_calico_default may be defined external to this role.
+# openshift_use_calico, if defined, may affect other roles or play behavior.
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 3e424da12..48f53aef8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,6 +61,14 @@
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
when: openshift_push_via_dns | default(false) | bool
+- name: Update registry proxy settings for dc/docker-registry
+ set_fact:
+ openshift_hosted_registry_env_vars: "{{ {'HTTPS_PROXY': (openshift.common.https_proxy | default('')),
+ 'HTTP_PROXY': (openshift.common.http_proxy | default('')),
+ 'NO_PROXY': (openshift.common.no_proxy | default(''))}
+ | combine(openshift_hosted_registry_env_vars) }}"
+ when: (openshift.common.https_proxy | default(False)) or (openshift.common.http_proxy | default('')) != ''
+
- name: Create the registry service account
oc_serviceaccount:
name: "{{ openshift_hosted_registry_serviceaccount }}"
@@ -129,7 +137,7 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait
+- when: openshift_hosted_registry_wait | bool
block:
- name: Ensure OpenShift registry correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index a8a6f6fc8..434b679df 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index e57ed733e..2a42b5a7c 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -18,6 +18,15 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+- name: Get the certificate contents for router
+ copy:
+ backup: True
+ dest: "/etc/origin/master/{{ item | basename }}"
+ src: "{{ item }}"
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
+ oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {}
+
# This is for when we desire a cluster signed cert
# The certificate is generated and placed in master_config_dir/
- block:
@@ -43,15 +52,6 @@
# End Block
when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
-- name: Get the certificate contents for router
- copy:
- backup: True
- dest: "/etc/origin/master/{{ item | basename }}"
- src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
- oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: not openshift_hosted_router_create_certificate | bool
-
- name: Create the router service account(s)
oc_serviceaccount:
name: "{{ item.serviceaccount }}"
@@ -94,7 +94,7 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait
+- when: openshift_hosted_router_wait | bool
block:
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
index 044c8043c..ab07a77c1 100644
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ b/roles/openshift_hosted_logging/meta/main.yaml
@@ -1,4 +1,3 @@
---
dependencies:
- - { role: openshift_common }
- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
index 11478263c..72754df2e 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
index 0e3d006a7..298f8039e 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
index 28feac4e6..dace26793 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8bf98ba41..f821efd6b 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index bbaf76c17..019d836fe 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
index 9c12865bf..4027f524b 100644
--- a/roles/openshift_hosted_templates/meta/main.yml
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index 41a2b12a2..239b16427 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ openshift_master_api_port | default(8443) }}/tcp"
- service: nuage mon
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
- cond: "{{ openshift_use_nuage | default(false) | bool }}"
+ cond: "{{ r_openshift_lb_use_nuage | bool }}"
+
+# NOTE
+# r_openshift_lb_use_nuage_default may be defined external to this role.
+# openshift_use_nuage, if defined, may affect other roles or play behavior.
+r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}"
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..de5e25061 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -134,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -165,6 +166,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 6431f86d9..e74918a40 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -44,6 +44,8 @@ spec:
cpu: "{{curator_cpu_limit}}"
{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+ requests:
+ memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 97525479e..95bf462d1 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_5"
-__allowed_curator_versions: ["3_5", "3_6"]
+__latest_curator_version: "3_6"
+__allowed_curator_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 0c06a7677..65b08d970 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -24,7 +24,8 @@ network:
cloud:
kubernetes:
- service: ${SERVICE_DNS}
+ pod_label: ${POD_LABEL}
+ pod_port: 9300
namespace: ${NAMESPACE}
discovery:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cbe6b89f2..3c8f390c4 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -48,7 +48,7 @@ spec:
cpu: "{{es_cpu_limit}}"
{% endif %}
requests:
- memory: "512Mi"
+ memory: "{{es_memory_limit}}"
ports:
-
containerPort: 9200
@@ -90,6 +90,12 @@ spec:
name: "RECOVER_AFTER_TIME"
value: "{{openshift_logging_elasticsearch_recover_after_time}}"
-
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
+ name: "POD_LABEL"
+ value: "component={{component}}"
+ -
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -106,6 +112,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/java/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 20fa63543..09e2ee4d0 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,6 +1,6 @@
---
-__latest_es_version: "3_5"
-__allowed_es_versions: ["3_5", "3_6"]
+__latest_es_version: "3_6"
+__allowed_es_versions: ["3_5", "3_6", "3_7"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 88e039e3f..a4afb6618 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -36,6 +36,8 @@ spec:
limits:
cpu: {{ openshift_logging_fluentd_cpu_limit }}
memory: {{ openshift_logging_fluentd_memory_limit }}
+ requests:
+ memory: {{ openshift_logging_fluentd_memory_limit }}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index ec8e565c3..92a426952 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_5"
-__allowed_fluentd_versions: ["3_5", "3_6"]
+__latest_fluentd_version: "3_6"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 512d99d06..da1386d3e 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -46,6 +46,8 @@ spec:
{% endif %}
{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+ requests:
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
{% endif %}
env:
@@ -82,6 +84,8 @@ spec:
{% endif %}
{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+ requests:
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 87b281c4b..241877a02 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_5"
-__allowed_kibana_versions: ["3_5", "3_6"]
+__latest_kibana_version: "3_6"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 70afe5cee..ff18d3270 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -45,6 +45,8 @@ spec:
{% endif %}
{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+ requests:
+ memory: "{{mux_memory_limit}}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index 4234b74e2..e7b57f4b5 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_5"
-__allowed_mux_versions: ["3_5", "3_6"]
+__latest_mux_version: "3_6"
+__allowed_mux_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index fbf69c270..86fa57b50 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -17,7 +17,6 @@ From this role:
| Name | Default value | |
|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
| oreg_url | UNDEF | Default docker registry to use |
| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
@@ -29,18 +28,10 @@ From this role:
| openshift_master_public_console_url | UNDEF | |
| openshift_master_saconfig_limit_secret_references | false | |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index d70106276..4c8d6fdad 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -22,5 +22,24 @@ r_openshift_master_os_firewall_allow:
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
+
+# NOTE
+# r_openshift_master_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}"
+
+r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
+
+r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+
+r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
+
+r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index bd2383f61..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift.common.use_contiv | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index a06defdb9..6203bfc7b 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -47,9 +47,9 @@
when:
- not openshift.common.is_containerized | bool
-- name: Create openshift.common.data_dir
+- name: Create r_openshift_master_data_dir
file:
- path: "{{ openshift.common.data_dir }}"
+ path: "{{ r_openshift_master_data_dir }}"
state: directory
mode: 0755
owner: root
@@ -169,7 +169,7 @@
register: l_already_set
- set_fact:
- openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
- name: Set fact of all etcd host IPs
openshift_facts:
@@ -177,9 +177,33 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+- include: registry_auth.yml
+
- name: Install the systemd units
include: systemd_units.yml
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
+
- name: Install Master system container
include: system_container.yml
when:
@@ -229,22 +253,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: master_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
new file mode 100644
index 000000000..96b6c614e
--- /dev/null
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 782a35abe..7a918c57e 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -7,9 +7,16 @@
# openshift_master_config_dir is set.
- name: Set openshift_master_config_dir if unset
set_fact:
- openshift_master_config_dir: '/var/lib/origin'
+ openshift_master_config_dir: '/etc/origin/master'
when: openshift_master_config_dir is not defined
+# This play may be consumed outside the role, we need to ensure that
+# r_openshift_master_data_dir is set.
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
- name: Remove the legacy master service if it exists
include: clean_systemd_units.yml
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index e8f7c47b0..a184a59f6 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,12 +12,22 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 69db62f16..2ded05f53 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,12 +11,22 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index c14579435..d045b402b 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -106,7 +106,7 @@ etcdConfig:
clientCA: ca.crt
{% endif %}
keyFile: etcd.server.key
- storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd
+ storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
@@ -179,8 +179,8 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 0e78d2d23..02bfd6f62 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier=atomic-openshift-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index 94928f88c..e284413f7 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index cf39b73f6..0c681c764 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,22 @@ openshift_master_valid_grant_methods:
- deny
openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e767772ce..56c864ec7 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -380,11 +380,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index 2e2013d40..d6756f9b9 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -10,7 +10,7 @@
is invalid, must be one of: emptydir, pv, dynamic
when:
- openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
- - "not {{ openshift_metrics_heapster_standalone | bool }}"
+ - not (openshift_metrics_heapster_standalone | bool)
- name: list existing secrets
command: >
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index fb0b494da..32670b18e 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -17,22 +17,12 @@ From this role:
| Name | Default value | |
|----------------------------|-----------------------|----------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
| oreg_url | UNDEF (Optional) | Default docker registry to use |
| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
-
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index a7dad5b1f..433e92201 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -60,7 +60,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -69,18 +69,42 @@ r_openshift_node_os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- cond: openshift.common.use_openshift_sdn | default(true) | bool
+ cond: openshift_use_openshift_sdn | bool
- service: Calico BGP Port
port: 179/tcp
- cond: "{{ openshift.common.use_calico | bool }}"
+ cond: "{{ openshift_node_use_calico }}"
- service: Kubernetes service NodePort TCP
port: "{{ openshift_node_port_range | default('') }}/tcp"
cond: "{{ openshift_node_port_range is defined }}"
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
+
+# NOTE
+# r_openshift_node_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}"
+
+openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}"
+
+openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+openshift_node_use_calico: "{{ openshift_node_use_calico_default }}"
+
+openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
+
+openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 14ba48aba..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,7 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 3db980514..ce5ecb9d0 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,11 +15,9 @@ dependencies:
- role: openshift_node_facts
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index cb1440283..b83b2c452 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -42,14 +42,25 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
-- name: Remove RPM generated config files if present
- file:
- path: "/etc/origin/{{ item }}"
- state: absent
- when:
- - rpmgenerated_config.stat.exists
- - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
- with_items:
- - master
- - node
- - .config_managed
+- when: rpmgenerated_config.stat.exists
+ block:
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ with_items:
+ - master
+
+ # with_fileglob doesn't work correctly due to a few issues.
+ # Could change this to fileglob when it gets fixed.
+ - name: find all files in /etc/origin/node so we can remove them
+ find:
+ path: /etc/origin/node/
+ register: find_results
+
+ - name: Remove everything except the resolv.conf required for node
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 8210fd881..2759188f3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,18 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Check for tuned package
- command: rpm -q tuned
- args:
- warn: no
- register: tuned_installed
- changed_when: false
- failed_when: false
-
-- name: Set atomic-guest tuned profile
- command: "tuned-adm profile atomic-guest"
- when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -22,7 +10,7 @@
daemon_reload: yes
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not ovs_start_result | failed
retries: 3
@@ -107,5 +95,9 @@
msg: Node failed to start please inspect the logs and try again
when: node_start_result | failed
+- name: Setup tuned
+ include: tuned.yml
+ static: yes
+
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 9bf4ed879..265bf2c46 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,11 +1,9 @@
---
-# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- when: not openshift.common.is_containerized | bool
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
@@ -13,7 +11,7 @@
name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- name: Install conntrack-tools package
package:
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 60a25dcc6..ef79b6ac0 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -49,41 +49,32 @@
state: restarted
when: openshift_use_crio | default(false)
+- name: restart NetworkManager to ensure resolv.conf is present
+ systemd:
+ name: NetworkManager
+ enabled: yes
+ state: restarted
+ when: openshift_node_bootstrap | bool
+
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
-
- name: Configure AWS Cloud Provider Settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
@@ -121,4 +112,4 @@
##### END Storage #####
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_node_use_openshift_sdn | default(true) | bool
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
new file mode 100644
index 000000000..f370bb260
--- /dev/null
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -0,0 +1,25 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 4687400cd..6b4490f61 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -26,7 +26,7 @@
- name: Install OpenvSwitch system containers
include: openvswitch_system_container.yml
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- openshift.common.is_openvswitch_system_container | bool
- block:
@@ -39,7 +39,7 @@
- include: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- not openshift.common.is_openvswitch_system_container | bool
- include: config/configure-node-settings.yml
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 711afcadb..7049f7189 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -21,8 +21,6 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
- remote
container-runtime-endpoint:
- /var/run/crio.sock
- experimental-cri:
- - 'true'
image-service-endpoint:
- /var/run/crio.sock
node-labels:
@@ -39,15 +37,15 @@ masterClientConnectionOverrides:
qps: 100
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
-{% if openshift.common.use_openshift_sdn | bool %}
-networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool %}
+networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
@@ -68,7 +66,7 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
+volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 639b6f6c8..4ab10b95f 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
index 84035b88c..d80ed1b72 100644
--- a/roles/openshift_node_dnsmasq/meta/main.yml
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_common
- role: openshift_node_facts
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 4e6229bfb..5ad994df9 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -32,14 +32,12 @@ From openshift.common:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.data_dir |---------------------|---------------------|
| openshift.common.hostname |---------------------|---------------------|
| openshift.common.http_proxy |---------------------|---------------------|
| openshift.common.is_atomic |---------------------|---------------------|
| openshift.common.is_containerized |---------------------|---------------------|
| openshift.common.portal_net |---------------------|---------------------|
| openshift.common.service_type |---------------------|---------------------|
-| openshift.common.use_openshift_sdn |---------------------|---------------------|
From openshift.master:
@@ -58,7 +56,7 @@ From openshift.node:
Dependencies
------------
-openshift_common
+
TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index ed97d539c..3d8704308 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1 +1,6 @@
---
+openshift_use_openshift_sdn: True
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index d31b899cf..90d80855e 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -6,7 +6,7 @@
when:
- not skip_node_svc_handlers | default(False) | bool
- not (ovs_service_status_changed | default(false) | bool)
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
register: l_openshift_node_upgrade_stop_openvswitch_result
until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
retries: 3
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index 2a36d8945..a810b01dc 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -11,4 +11,3 @@ galaxy_info:
- 7
dependencies:
- role: lib_utils
-- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index bc092c26c..e34319186 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -44,7 +44,7 @@
changed_when: "'Downloaded newer image' in pull_result.stdout"
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
- include: docker/upgrade.yml
vars:
@@ -142,7 +142,7 @@
# End Disable Swap Block
- name: Reset selinux context
- command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
when:
- ansible_selinux is defined
- ansible_selinux.status == 'enabled'
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index 4e9550150..afff2f8ba 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -4,7 +4,7 @@
# - openshift_image_tag
# - openshift.common.is_containerized
# - openshift.node.ovs_image
-# - openshift.common.use_openshift_sdn
+# - openshift_use_openshift_sdn
# - openshift.common.service_type
# - openshift.node.debug_level
# - openshift.common.config_base
@@ -28,10 +28,10 @@
when: openshift.common.is_containerized | bool
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_use_openshift_sdn | bool
- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
- include: config/configure-node-settings.yml
- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 639b6f6c8..451412ab0 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md
index 1489cb0bd..0407d6ef1 100644
--- a/roles/openshift_persistent_volumes/README.md
+++ b/roles/openshift_persistent_volumes/README.md
@@ -17,13 +17,6 @@ From this role:
| persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-
-
Dependencies
------------
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 25e5a38dd..8d3d010e4 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -10,5 +10,4 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_common
- role: openshift_hosted_facts
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
new file mode 100644
index 000000000..c5a44bffb
--- /dev/null
+++ b/roles/openshift_prometheus/README.md
@@ -0,0 +1,95 @@
+OpenShift Prometheus
+====================
+
+OpenShift Prometheus Installation
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+For default values, see [`defaults/main.yaml`](defaults/main.yaml).
+
+- `openshift_prometheus_state`: present - install/update. absent - uninstall.
+
+- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
+ deployed.
+
+- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
+
+- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
+
+- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+
+## Storage related variables
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: alertmanager
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+```
+
+## Additional Alert Rules file variable
+An external file with alert rules can be added by setting path to additional rules variable:
+```
+openshift_prometheus_additional_rules_file: <PATH>
+```
+
+File content should be in prometheus alert rules format.
+Following example sets rule to fire an alert when one of the cluster nodes is down:
+
+```
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
+ - alert: Node Down
+ expr: up{job="kubernetes-nodes"} == 0
+ annotations:
+ miqTarget: "ContainerNode"
+ severity: "HIGH"
+ message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down"
+```
+
+
+## Additional variables to control resource limits
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting
+the corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
+```
+e.g
+```
+openshift_prometheus_alertmanager_limits_memory: 1Gi
+openshift_prometheus_oath_proxy_requests_cpu: 100
+```
+
+Dependencies
+------------
+
+openshift_facts
+
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
new file mode 100644
index 000000000..5aa8aecec
--- /dev/null
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -0,0 +1,74 @@
+---
+# defaults file for openshift_prometheus
+openshift_prometheus_state: present
+
+openshift_prometheus_namespace: prometheus
+
+openshift_prometheus_replicas: 1
+openshift_prometheus_node_selector: {"region":"infra"}
+
+# images
+openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
+openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
+
+# additional prometheus rules file
+openshift_prometheus_additional_rules_file: null
+
+# All the required exports
+openshift_prometheus_pv_exports:
+ - prometheus
+ - prometheus-alertmanager
+ - prometheus-alertbuffer
+# PV template files and their created object names
+openshift_prometheus_pv_data:
+ - pv_name: prometheus
+ pv_template: prom-pv-server.yml
+ pv_label: Prometheus Server PV
+ - pv_name: prometheus-alertmanager
+ pv_template: prom-pv-alertmanager.yml
+ pv_label: Prometheus Alertmanager PV
+ - pv_name: prometheus-alertbuffer
+ pv_template: prom-pv-alertbuffer.yml
+ pv_label: Prometheus Alert Buffer PV
+
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
+
+# storage
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_pvc_name: prometheus
+openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_pvc_pv_selector: {}
+
+openshift_prometheus_alertmanager_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
+openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertmanager_pvc_pv_selector: {}
+
+openshift_prometheus_alertbuffer_storage_type: pvc
+openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+
+# container resources
+openshift_prometheus_cpu_limit: null
+openshift_prometheus_memory_limit: null
+openshift_prometheus_cpu_requests: null
+openshift_prometheus_memory_requests: null
+openshift_prometheus_alertmanager_cpu_limit: null
+openshift_prometheus_alertmanager_memory_limit: null
+openshift_prometheus_alertmanager_cpu_requests: null
+openshift_prometheus_alertmanager_memory_requests: null
+openshift_prometheus_alertbuffer_cpu_limit: null
+openshift_prometheus_alertbuffer_memory_limit: null
+openshift_prometheus_alertbuffer_cpu_requests: null
+openshift_prometheus_alertbuffer_memory_requests: null
+openshift_prometheus_oauth_proxy_cpu_limit: null
+openshift_prometheus_oauth_proxy_memory_limit: null
+openshift_prometheus_oauth_proxy_cpu_requests: null
+openshift_prometheus_oauth_proxy_memory_requests: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
new file mode 100644
index 000000000..3ccedb1fd
--- /dev/null
+++ b/roles/openshift_prometheus/files/openshift_prometheus.exports
@@ -0,0 +1,3 @@
+/exports/prometheus *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
new file mode 100644
index 000000000..33188bb7e
--- /dev/null
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift prometheus integration for the cluster
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
new file mode 100644
index 000000000..4e79da05f
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/create_pvs.yaml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required Prometheus PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: prom_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ tempdir }}/templates/{{ item.pv_template }}"
+ when:
+ - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
new file mode 100644
index 000000000..a9bce2fb1
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -0,0 +1,244 @@
+---
+
+# namespace
+- name: Add prometheus project
+ oc_project:
+ state: "{{ state }}"
+ name: "{{ openshift_prometheus_namespace }}"
+ node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ description: Prometheus
+
+# secrets
+- name: Set alert and prometheus secrets
+ oc_secret:
+ state: "{{ state }}"
+ name: "{{ item }}-proxy"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ contents:
+ - path: session_secret
+ data: "{{ 43 | oo_random_word }}="
+ with_items:
+ - prometheus
+ - alerts
+
+# serviceaccount
+- name: create prometheus serviceaccount
+ oc_serviceaccount:
+ state: "{{ state }}"
+ name: prometheus
+ namespace: "{{ openshift_prometheus_namespace }}"
+ # TODO add annotations when supproted
+ # annotations:
+ # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+ secrets:
+ - prometheus-secrets
+ changed_when: no
+
+# TODO remove this when annotations are supported by oc_serviceaccount
+- name: annotate serviceaccount
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ serviceaccount prometheus
+ serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+
+# create clusterrolebinding for prometheus serviceaccount
+- name: Set cluster-reader permissions for prometheus
+ oc_adm_policy_user:
+ state: "{{ state }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
+
+
+######################################################################
+# NFS
+# In the case that we are not running on a cloud provider, volumes must be statically provisioned
+
+- include: nfs.yaml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
+
+
+# create prometheus and alerts services
+# TODO join into 1 task with loop
+- name: Create prometheus service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 8443
+ with_items:
+ - name: prometheus
+
+- name: Create alerts service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 9443
+ with_items:
+ - name: alerts
+
+
+# Annotate services with secret name
+# TODO remove this when annotations are supported by oc_service
+- name: annotate prometheus service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
+
+- name: annotate alerts service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
+
+# create prometheus and alerts routes
+- name: create prometheus and alerts routes
+ oc_route:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ service_name: "{{ item.name }}"
+ tls_termination: reencrypt
+ with_items:
+ - name: prometheus
+ - name: alerts
+
+# Storage
+- name: create prometheus pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_pvc_size }}"
+ selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+
+- name: create alertmanager pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertmanager_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+
+- name: create alertbuffer pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertbuffer_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+
+# create prometheus deployment
+- name: Set prometheus deployment template
+ template:
+ src: prometheus_deployment.j2
+ dest: "{{ tempdir }}/templates/prometheus.yaml"
+ vars:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ prom_replicas: "{{ openshift_prometheus_replicas }}"
+
+- name: Set prometheus deployment
+ oc_obj:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: deployment
+ files:
+ - "{{ tempdir }}/templates/prometheus.yaml"
+ delete_after: true
+
+# prometheus configmap
+# Copy the additional rules file if it is defined
+- name: Copy additional rules file to host
+ copy:
+ src: "{{ openshift_prometheus_additional_rules_file }}"
+ dest: "{{ tempdir }}/prometheus.additional.rules"
+ when:
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
+
+- stat:
+ path: "{{ tempdir }}/prometheus.additional.rules"
+ register: additional_rules_stat
+
+# The kubernetes version impacts the prometheus scraping endpoint
+# so gathering it before constructing the configmap
+- name: get oc version
+ oc_version:
+ register: oc_version
+
+- set_fact:
+ kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
+
+- template:
+ src: prometheus.yml.j2
+ dest: "{{ tempdir }}/prometheus.yml"
+ changed_when: no
+
+- template:
+ src: prometheus.rules.j2
+ dest: "{{ tempdir }}/prometheus.rules"
+ changed_when: no
+
+# In prometheus configmap create "additional.rules" section if file exists
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == True
+
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == False
+
+# alertmanager configmap
+- template:
+ src: alertmanager.yml.j2
+ dest: "{{ tempdir }}/alertmanager.yml"
+ changed_when: no
+
+- name: Set alertmanager configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus-alerts"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
new file mode 100644
index 000000000..523a64334
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+
+- name: Create temp directory for doing work in on target
+ command: mktemp -td openshift-prometheus-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+- include: install_prometheus.yaml
+ vars:
+ state: "{{ openshift_prometheus_state }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
new file mode 100644
index 000000000..0b45f2cee
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/nfs.yaml
@@ -0,0 +1,44 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the prom-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+ with_items: "{{ openshift_prometheus_pv_exports }}"
+
+- name: Ensure the NFS exports for Prometheus PVs exist
+ copy:
+ src: openshift_prometheus.exports
+ dest: /etc/exports.d/openshift_prometheus.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required Prometheus PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yaml
+ with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/alertmanager.yml.j2 b/roles/openshift_prometheus/templates/alertmanager.yml.j2
new file mode 100644
index 000000000..6c432a3d0
--- /dev/null
+++ b/roles/openshift_prometheus/templates/alertmanager.yml.j2
@@ -0,0 +1,20 @@
+global:
+
+# The root route on which each incoming alert enters.
+route:
+ # default route if none match
+ receiver: alert-buffer-wh
+
+ # The labels by which incoming alerts are grouped together. For example,
+ # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+ # be batched into a single group.
+ # TODO:
+ group_by: []
+
+ # All the above attributes are inherited by all child routes and can
+ # overwritten on each.
+
+receivers:
+- name: alert-buffer-wh
+ webhook_configs:
+ - url: http://localhost:9099/topics/alerts
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
new file mode 100644
index 000000000..55a5e19c3
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertbuffer
+ labels:
+ storage: prometheus-alertbuffer
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertbuffer
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
new file mode 100644
index 000000000..4ee518735
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertmanager
+ labels:
+ storage: prometheus-alertmanager
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertmanager
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
new file mode 100644
index 000000000..933bf0f60
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus
+ labels:
+ storage: prometheus
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus.rules.j2 b/roles/openshift_prometheus/templates/prometheus.rules.j2
new file mode 100644
index 000000000..e861dc127
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.rules.j2
@@ -0,0 +1,4 @@
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2
new file mode 100644
index 000000000..63430f834
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.yml.j2
@@ -0,0 +1,174 @@
+rule_files:
+ - 'prometheus.rules'
+{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %}
+ - 'prometheus.additional.rules'
+{% endif %}
+
+
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # Keep only the default/kubernetes service endpoints for the https port. This
+ # will add targets for each API server which Kubernetes adds an endpoint to
+ # the default/kubernetes service.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+
+# Scrape config for nodes.
+#
+# Each node exposes a /metrics endpoint that contains operational metrics for
+# the Kubelet and other components.
+- job_name: 'kubernetes-nodes'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for controllers.
+#
+# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
+# the controllers.
+#
+# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via
+# endpoints.
+- job_name: 'kubernetes-controllers'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ # Keep only the default/kubernetes service endpoints for the https port, and then
+ # set the port to 8444. This is the default configuration for the controllers on OpenShift
+ # masters.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - source_labels: [__address__]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+)
+ replacement: $1:8444
+
+# Scrape config for cAdvisor.
+#
+# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
+# reports container metrics for each running pod. Scrape those by default.
+- job_name: 'kubernetes-cadvisor'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+{% if kubernetes_version | float() >= 1.7 | float() %}
+ metrics_path: /metrics/cadvisor
+{% else %}
+ metrics_path: /metrics
+{% endif %}
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# via the following annotations:
+#
+# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+# to set this to `https` & most likely set the `tls_config` of the scrape config.
+# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+# * `prometheus.io/port`: If the metrics are exposed on a different port to the
+# service then set this appropriately.
+- job_name: 'kubernetes-service-endpoints'
+
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ # TODO: this should be per target
+ insecure_skip_verify: true
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+);(\d+)
+ replacement: $1:$2
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username]
+ action: replace
+ target_label: __basic_auth_username__
+ regex: (.+)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password]
+ action: replace
+ target_label: __basic_auth_password__
+ regex: (.+)
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+
+alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - "localhost:9093"
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
new file mode 100644
index 000000000..98c117f19
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -0,0 +1,240 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: prometheus
+ namespace: {{ namespace }}
+ labels:
+ app: prometheus
+spec:
+ replicas: {{ prom_replicas|default(1) }}
+ selector:
+ provider: openshift
+ matchLabels:
+ app: prometheus
+ template:
+ metadata:
+ name: prometheus
+ labels:
+ app: prometheus
+ spec:
+ serviceAccountName: prometheus
+{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in openshift_prometheus_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ # Deploy Prometheus behind an oauth proxy
+ - name: prom-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9090
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: prometheus-tls
+ - mountPath: /etc/proxy/secrets
+ name: prometheus-secrets
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ - name: prometheus
+ args:
+ - --storage.tsdb.retention=6h
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --web.listen-address=localhost:9090
+ image: "{{ openshift_prometheus_image_prometheus }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
+ memory: "{{openshift_prometheus_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
+ memory: "{{ openshift_prometheus_memory_limit }}"
+{% endif %}
+{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_cpu_limit}}"
+{% endif %}
+
+ volumeMounts:
+ - mountPath: /etc/prometheus
+ name: prometheus-config
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
+ - name: alerts-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:9443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9099
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: alerts-tls
+ - mountPath: /etc/proxy/secrets
+ name: alerts-secrets
+
+ - name: alert-buffer
+ args:
+ - --storage-path=/alert-buffer/messages.db
+ image: "{{ openshift_prometheus_image_alertbuffer }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}"
+{% endif %}
+ volumeMounts:
+ - mountPath: /alert-buffer
+ name: alert-buffer-data
+ ports:
+ - containerPort: 9099
+ name: alert-buf
+
+ - name: alertmanager
+ args:
+ - -config.file=/etc/alertmanager/alertmanager.yml
+ image: "{{ openshift_prometheus_image_alertmanager }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9093
+ name: web
+ volumeMounts:
+ - mountPath: /etc/alertmanager
+ name: alertmanager-config
+ - mountPath: /alertmanager
+ name: alertmanager-data
+
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config
+ configMap:
+ defaultMode: 420
+ name: prometheus
+ - name: prometheus-secrets
+ secret:
+ secretName: prometheus-proxy
+ - name: prometheus-tls
+ secret:
+ secretName: prometheus-tls
+ - name: prometheus-data
+{% if openshift_prometheus_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alertmanager-config
+ configMap:
+ defaultMode: 420
+ name: prometheus-alerts
+ - name: alerts-secrets
+ secret:
+ secretName: alerts-proxy
+ - name: alerts-tls
+ secret:
+ secretName: prometheus-alerts-tls
+ - name: alertmanager-data
+{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertmanager_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alert-buffer-data
+{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_prometheus/tests/inventory b/roles/openshift_prometheus/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/roles/openshift_prometheus/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/roles/openshift_prometheus/tests/test.yaml b/roles/openshift_prometheus/tests/test.yaml
new file mode 100644
index 000000000..37baf573c
--- /dev/null
+++ b/roles/openshift_prometheus/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - openshift_prometheus
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..47d7be05a 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -12,6 +12,27 @@
deployment_type is deprecated in favor of openshift_deployment_type.
Please specify only openshift_deployment_type, or make both the same.
+# osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
+# now required to avoid changes that may occur between releases
+#
+# Note: We will skip these checks when some tests run which don't
+# actually do any insalling/upgrading/scaling/etc..
+# Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
+- when:
+ - not testing_skip_some_requirements|default(False)|bool
+ assert:
+ that:
+ - "osm_cluster_network_cidr is defined"
+ - "osm_host_subnet_length is defined"
+ - "openshift_portal_net is defined"
+ msg: >
+ osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
+ variables. If you are upgrading or scaling up these variables should match what is currently used
+ in the cluster. If you don't remember what these values are you can find them in
+ /etc/origin/master/master-config.yaml on a master with the names clusterNetworkCIDR
+ (osm_cluster_network_cidr), hostSubnetLength (osm_host_subnet_length),
+ and serviceNetworkCIDR (openshift_portal_net).
+
- name: Standardize on latest variable names
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 01ee2544d..7c848cb12 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,3 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+
+openshift_use_openshift_sdn: True
+# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
index 1f25cc39f..16a307c06 100644
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -1,2 +1 @@
window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 64f94347b..746c73eaf 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -28,7 +28,7 @@
- name: Make kube-service-catalog project network global
command: >
oc adm pod-network make-projects-global kube-service-catalog
- when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant'
+ when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
- include: generate_certs.yml
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index a059745a6..d0bc0e028 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
@@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 0b3d3aef1..148549887 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
@@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
@@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: []
r_openshift_storage_glusterfs_os_firewall_allow:
- service: glusterfs_sshd
port: "2222/tcp"
-- service: glusterfs_daemon
- port: "24007/tcp"
- service: glusterfs_management
+ port: "24007/tcp"
+- service: glusterfs_rdma
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
new file mode 100644
index 000000000..9ebb0d5ec
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -0,0 +1,143 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index a31c5bd5e..bc0dde17d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -15,6 +15,7 @@
oc_project:
state: present
name: "{{ glusterfs_namespace }}"
+ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"
when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
- name: Delete pre-existing heketi resources
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 7a2987883..012c722ff 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 17f87578d..1bcab8e49 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c0ea00f34..1ff99adf8 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,6 +5,16 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
+- name: Install the base package for versioning
+ package:
+ name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
+
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..2a2cf40f3 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -59,6 +63,8 @@
- name: Attach to OpenShift Pool
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml