summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory3
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--callback_plugins/default.py70
-rw-r--r--docs/proposals/playbook_consolidation.md178
-rw-r--r--files/origin-components/apiserver-config.yaml4
-rw-r--r--files/origin-components/apiserver-template.yaml122
-rw-r--r--files/origin-components/rbac-template.yaml92
-rw-r--r--filter_plugins/oo_filters.py320
-rw-r--r--filter_plugins/openshift_version.py4
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--images/installer/root/exports/config.json.template2
-rwxr-xr-ximages/installer/root/usr/local/bin/run2
-rw-r--r--inventory/byo/hosts.origin.example100
-rw-r--r--inventory/byo/hosts.ose.example100
-rw-r--r--openshift-ansible.spec92
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml6
-rw-r--r--playbooks/byo/openshift-loadbalancer/config.yml6
-rw-r--r--playbooks/byo/openshift-nfs/config.yml6
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
-rw-r--r--playbooks/common/openshift-cluster/config.yml19
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml11
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml4
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml8
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml28
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml41
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml20
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml63
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml67
-rw-r--r--playbooks/common/openshift-etcd/restart.yml18
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml21
-rw-r--r--playbooks/common/openshift-master/additional_config.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml46
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml28
-rw-r--r--playbooks/common/openshift-nfs/config.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml19
-rw-r--r--roles/ansible_service_broker/defaults/main.yml1
-rw-r--r--roles/ansible_service_broker/tasks/main.yml4
-rw-r--r--roles/calico/tasks/main.yml22
-rw-r--r--roles/docker/defaults/main.yml18
-rw-r--r--roles/docker/tasks/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml20
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml28
-rw-r--r--roles/docker/templates/registries.conf46
-rw-r--r--roles/etcd/meta/main.yml2
-rw-r--r--roles/etcd/tasks/auxiliary/clean_data.yml (renamed from roles/etcd_migrate/tasks/clean_data.yml)0
-rw-r--r--roles/etcd/tasks/ca.yml2
-rw-r--r--roles/etcd/tasks/ca/deploy.yml (renamed from roles/etcd_ca/tasks/main.yml)4
-rw-r--r--roles/etcd/tasks/clean_data.yml2
-rw-r--r--roles/etcd/tasks/client_certificates.yml2
-rw-r--r--roles/etcd/tasks/client_certificates/fetch_from_ca.yml (renamed from roles/etcd_client_certificates/tasks/main.yml)2
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/migrate.add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migrate.configure_master.yml2
-rw-r--r--roles/etcd/tasks/migrate.pre_check.yml2
-rw-r--r--roles/etcd/tasks/migrate.yml2
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml (renamed from roles/etcd_migrate/tasks/add_ttls.yml)1
-rw-r--r--roles/etcd/tasks/migration/check.yml (renamed from roles/etcd_migrate/tasks/check.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_health.yml (renamed from roles/etcd_migrate/tasks/check_cluster_health.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_status.yml (renamed from roles/etcd_migrate/tasks/check_cluster_status.yml)0
-rw-r--r--roles/etcd/tasks/migration/configure_master.yml (renamed from roles/etcd_migrate/tasks/configure.yml)0
-rw-r--r--roles/etcd/tasks/migration/migrate.yml (renamed from roles/etcd_migrate/tasks/migrate.yml)0
-rw-r--r--roles/etcd/tasks/server_certificates.yml2
-rw-r--r--roles/etcd/tasks/server_certificates/fetch_from_ca.yml (renamed from roles/etcd_server_certificates/tasks/main.yml)8
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_image.yml (renamed from roles/etcd_upgrade/tasks/upgrade_image.yml)9
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_rpm.yml (renamed from roles/etcd_upgrade/tasks/upgrade_rpm.yml)0
-rw-r--r--roles/etcd/tasks/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade_rpm.yml2
-rw-r--r--roles/etcd/templates/openssl_append.j2 (renamed from roles/etcd_ca/templates/openssl_append.j2)0
-rw-r--r--roles/etcd_ca/README.md34
-rw-r--r--roles/etcd_client_certificates/README.md34
-rw-r--r--roles/etcd_client_certificates/meta/main.yml16
-rw-r--r--roles/etcd_common/defaults/main.yml3
-rw-r--r--roles/etcd_common/tasks/backup.yml2
-rw-r--r--roles/etcd_migrate/README.md53
-rw-r--r--roles/etcd_migrate/defaults/main.yml3
-rw-r--r--roles/etcd_migrate/meta/main.yml17
-rw-r--r--roles/etcd_migrate/tasks/main.yml25
-rw-r--r--roles/etcd_server_certificates/README.md34
-rw-r--r--roles/etcd_server_certificates/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/defaults/main.yml3
-rw-r--r--roles/etcd_upgrade/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/tasks/main.yml14
-rw-r--r--roles/etcd_upgrade/tasks/upgrade.yml11
-rw-r--r--roles/etcd_upgrade/vars/main.yml3
-rw-r--r--roles/flannel/README.md2
-rw-r--r--roles/flannel/meta/main.yml5
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py2
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_configmap.yml4
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_configmap.py6
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rw-r--r--roles/nuage_master/meta/main.yml3
-rw-r--r--roles/openshift_etcd_ca/meta/main.yml18
-rw-r--r--roles/openshift_etcd_client_certificates/meta/main.yml4
-rw-r--r--roles/openshift_etcd_client_certificates/tasks/main.yml4
-rw-r--r--roles/openshift_etcd_server_certificates/meta/main.yml16
-rw-r--r--roles/openshift_examples/README.md14
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py177
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh318
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py158
-rw-r--r--roles/openshift_health_checker/library/ocutil.py11
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py134
-rw-r--r--roles/openshift_health_checker/openshift_checks/diagnostics.py62
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py12
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py1
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py125
-rw-r--r--roles/openshift_health_checker/test/diagnostics_test.py50
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py35
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py8
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py2
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml2
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml3
-rw-r--r--roles/openshift_hosted_logging/README.md40
-rw-r--r--roles/openshift_hosted_logging/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_logging/handlers/main.yml21
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml3
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml59
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml177
-rw-r--r--roles/openshift_hosted_logging/tasks/main.yaml8
-rw-r--r--roles/openshift_hosted_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml32
-rw-r--r--roles/openshift_logging/defaults/main.yml62
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml2
-rw-r--r--roles/openshift_logging/tasks/main.yaml9
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_master/defaults/main.yml4
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/main.yml26
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml30
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/vars/main.yml19
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py30
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py17
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py16
-rw-r--r--roles/openshift_metrics/README.md2
-rw-r--r--roles/openshift_metrics/defaults/main.yaml14
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml8
-rw-r--r--roles/openshift_metrics/templates/route.j23
-rw-r--r--roles/openshift_metrics/vars/default_images.yml4
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml4
-rw-r--r--roles/openshift_node/defaults/main.yml8
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/tasks/config.yml8
-rw-r--r--roles/openshift_node/tasks/main.yml20
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml2
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml19
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh2
-rw-r--r--roles/openshift_node_facts/filter_plugins/filters.py (renamed from filter_plugins/openshift_node.py)6
-rw-r--r--roles/openshift_node_facts/tasks/main.yml2
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml6
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml24
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service17
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml2
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml5
-rw-r--r--roles/openshift_repos/README.md10
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py25
-rw-r--r--roles/openshift_sanitize_inventory/library/conditional_set_fact.py68
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml48
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml17
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml21
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml81
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js3
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml11
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml5
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml50
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml27
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j26
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml15
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml11
-rw-r--r--roles/rhel_subscribe/tasks/main.yml10
-rw-r--r--roles/template_service_broker/defaults/main.yml4
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js2
-rw-r--r--roles/template_service_broker/meta/main.yml (renamed from roles/etcd_ca/meta/main.yml)7
-rw-r--r--roles/template_service_broker/tasks/install.yml47
-rw-r--r--roles/template_service_broker/tasks/main.yml8
-rw-r--r--roles/template_service_broker/tasks/remove.yml28
-rw-r--r--roles/template_service_broker/vars/default_images.yml2
-rw-r--r--roles/template_service_broker/vars/main.yml6
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml2
-rw-r--r--test/integration/openshift_health_checker/common.go2
-rw-r--r--test/openshift_version_tests.py30
-rw-r--r--utils/docs/config.md1
266 files changed, 3446 insertions, 1686 deletions
diff --git a/.papr.inventory b/.papr.inventory
index 878d434e2..aa4324c21 100644
--- a/.papr.inventory
+++ b/.papr.inventory
@@ -11,6 +11,9 @@ openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
openshift_check_min_host_disk_gb=1.5
openshift_check_min_host_memory_gb=1.9
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
[masters]
ocp-master
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9a5acc500..b2155c30f 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.126.0 ./
+3.7.0-0.127.0 ./
diff --git a/callback_plugins/default.py b/callback_plugins/default.py
deleted file mode 100644
index 97ad77724..000000000
--- a/callback_plugins/default.py
+++ /dev/null
@@ -1,70 +0,0 @@
-'''Plugin to override the default output logic.'''
-
-# upstream: https://gist.github.com/cliffano/9868180
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-# For some reason this has to be done
-import imp
-import os
-
-ANSIBLE_PATH = imp.find_module('ansible')[1]
-DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
-DEFAULT_MODULE = imp.load_source(
- 'ansible.plugins.callback.default',
- DEFAULT_PATH
-)
-
-try:
- from ansible.plugins.callback import CallbackBase
- BASECLASS = CallbackBase
-except ImportError: # < ansible 2.1
- BASECLASS = DEFAULT_MODULE.CallbackModule
-
-
-class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init
- '''
- Override for the default callback module.
-
- Render std err/out outside of the rest of the result which it prints with
- indentation.
- '''
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
- CALLBACK_NAME = 'default'
-
- def __init__(self, *args, **kwargs):
- # pylint: disable=non-parent-init-called
- BASECLASS.__init__(self, *args, **kwargs)
-
- def _dump_results(self, result):
- '''Return the text to output for a result.'''
- result['_ansible_verbose_always'] = True
-
- save = {}
- for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
- if key in result:
- save[key] = result.pop(key)
-
- output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
-
- for key in ['stdout', 'stderr', 'msg']:
- if key in save and save[key]:
- output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
-
- for key, value in save.items():
- result[key] = value
-
- return output
diff --git a/docs/proposals/playbook_consolidation.md b/docs/proposals/playbook_consolidation.md
new file mode 100644
index 000000000..98aedb021
--- /dev/null
+++ b/docs/proposals/playbook_consolidation.md
@@ -0,0 +1,178 @@
+# OpenShift-Ansible Playbook Consolidation
+
+## Description
+The designation of `byo` is no longer applicable due to being able to deploy on
+physical hardware or cloud resources using the playbooks in the `byo` directory.
+Consolidation of these directories will make maintaining the code base easier
+and provide a more straightforward project for users and developers.
+
+The main points of this proposal are:
+* Consolidate initialization playbooks into one set of playbooks in
+ `playbooks/init`.
+* Collapse the `playbooks/byo` and `playbooks/common` into one set of
+ directories at `playbooks/openshift-*`.
+
+This consolidation effort may be more appropriate when the project moves to
+using a container as the default installation method.
+
+## Design
+
+### Initialization Playbook Consolidation
+Currently there are two separate sets of initialization playbooks:
+* `playbooks/byo/openshift-cluster/initialize_groups.yml`
+* `playbooks/common/openshift-cluster/std_include.yml`
+
+Although these playbooks are located in the `openshift-cluster` directory they
+are shared by all of the `openshift-*` areas. These playbooks would be better
+organized in a `playbooks/init` directory collocated with all their related
+playbooks.
+
+In the example below, the following changes have been made:
+* `playbooks/byo/openshift-cluster/initialize_groups.yml` renamed to
+ `playbooks/init/initialize_host_groups.yml`
+* `playbooks/common/openshift-cluster/std_include.yml` renamed to
+ `playbooks/init/main.yml`
+* `- include: playbooks/init/initialize_host_groups.yml` has been added to the
+ top of `playbooks/init/main.yml`
+* All other related files for initialization have been moved to `playbooks/init`
+
+The `initialize_host_groups.yml` playbook is only one play with one task for
+importing variables for inventory group conversions. This task could be further
+consolidated with the play in `evaluate_groups.yml`.
+
+The new standard initialization playbook would be
+`playbooks/init/main.yml`.
+
+
+```
+
+> $ tree openshift-ansible/playbooks/init
+.
+├── evaluate_groups.yml
+├── initialize_facts.yml
+├── initialize_host_groups.yml
+├── initialize_openshift_repos.yml
+├── initialize_openshift_version.yml
+├── main.yml
+├── roles -> ../../roles
+├── validate_hostnames.yml
+└── vars
+ └── cluster_hosts.yml
+```
+
+```yaml
+# openshift-ansible/playbooks/init/main.yml
+---
+- include: initialize_host_groups.yml
+
+- include: evaluate_groups.yml
+
+- include: initialize_facts.yml
+
+- include: validate_hostnames.yml
+
+- include: initialize_openshift_repos.yml
+
+- include: initialize_openshift_version.yml
+```
+
+### `byo` and `common` Playbook Consolidation
+Historically, the `byo` directory coexisted with other platform directories
+which contained playbooks that then called into `common` playbooks to perform
+common installation steps for all platforms. Since the other platform
+directories have been removed this separation is no longer necessary.
+
+In the example below, the following changes have been made:
+* `playbooks/byo/openshift-master` renamed to
+ `playbooks/openshift-master`
+* `playbooks/common/openshift-master` renamed to
+ `playbooks/openshift-master/private`
+* Original `byo` entry point playbooks have been updated to include their
+ respective playbooks from `private/`.
+* Symbolic links have been updated as necessary
+
+All user consumable playbooks are in the root of `openshift-master` and no entry
+point playbooks exist in the `private` directory. Maintaining the separation
+between entry point playbooks and the private playbooks allows individual pieces
+of the deployments to be used as needed by other components.
+
+```
+openshift-ansible/playbooks/openshift-master
+> $ tree
+.
+├── config.yml
+├── private
+│   ├── additional_config.yml
+│   ├── config.yml
+│   ├── filter_plugins -> ../../../filter_plugins
+│   ├── library -> ../../../library
+│   ├── lookup_plugins -> ../../../lookup_plugins
+│   ├── restart_hosts.yml
+│   ├── restart_services.yml
+│   ├── restart.yml
+│   ├── roles -> ../../../roles
+│   ├── scaleup.yml
+│   └── validate_restart.yml
+├── restart.yml
+└── scaleup.yml
+```
+
+```yaml
+# openshift-ansible/playbooks/openshift-master/config.yml
+---
+- include: ../init/main.yml
+
+- include: private/config.yml
+```
+
+With the consolidation of the directory structure and component installs being
+removed from `openshift-cluster`, that directory is no longer necessary. To
+deploy an entire OpenShift cluster, a playbook would be created to tie together
+all of the different components. The following example shows how multiple
+components would be combined to perform a complete install.
+
+```yaml
+# openshift-ansible/playbooks/deploy_cluster.yml
+---
+- include: init/main.yml
+
+- include: openshift-etcd/private/config.yml
+
+- include: openshift-nfs/private/config.yml
+
+- include: openshift-loadbalancer/private/config.yml
+
+- include: openshift-master/private/config.yml
+
+- include: openshift-node/private/config.yml
+
+- include: openshift-glusterfs/private/config.yml
+
+- include: openshift-hosted/private/config.yml
+
+- include: openshift-service-catalog/private/config.yml
+```
+
+## User Story
+As a developer of OpenShift-Ansible,
+I want simplify the playbook directory structure
+so that users can easily find deployment playbooks and developers know where new
+features should be developed.
+
+## Implementation
+Given the size of this refactoring effort, it should be broken into smaller
+steps which can be completed independently while still maintaining a functional
+project.
+
+Steps:
+1. Update and merge consolidation of the initialization playbooks.
+2. Update each merge consolidation of each `openshift-*` component area
+3. Update and merge consolidation of `openshift-cluster`
+
+## Acceptance Criteria
+* Verify that all entry points playbooks install or configure as expected.
+* Verify that CI is updated for testing new playbook locations.
+* Verify that repo documentation is updated
+* Verify that user documentation is updated
+
+## References
diff --git a/files/origin-components/apiserver-config.yaml b/files/origin-components/apiserver-config.yaml
new file mode 100644
index 000000000..e4048d1da
--- /dev/null
+++ b/files/origin-components/apiserver-config.yaml
@@ -0,0 +1,4 @@
+kind: TemplateServiceBrokerConfig
+apiVersion: config.templateservicebroker.openshift.io/v1
+templateNamespaces:
+- openshift
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml
new file mode 100644
index 000000000..1b42597af
--- /dev/null
+++ b/files/origin-components/apiserver-template.yaml
@@ -0,0 +1,122 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: template-service-broker-apiserver
+parameters:
+- name: IMAGE
+ value: openshift/origin:latest
+- name: NAMESPACE
+ value: openshift-template-service-broker
+- name: LOGLEVEL
+ value: "0"
+- name: API_SERVER_CONFIG
+ value: |
+ kind: TemplateServiceBrokerConfig
+ apiVersion: config.templateservicebroker.openshift.io/v1
+ templateNamespaces:
+ - openshift
+objects:
+
+# to create the tsb server
+- apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+ labels:
+ apiserver: "true"
+ spec:
+ template:
+ metadata:
+ name: apiserver
+ labels:
+ apiserver: "true"
+ spec:
+ serviceAccountName: apiserver
+ containers:
+ - name: c
+ image: ${IMAGE}
+ imagePullPolicy: IfNotPresent
+ command:
+ - "/usr/bin/openshift"
+ - "start"
+ - "template-service-broker"
+ - "--secure-port=8443"
+ - "--audit-log-path=-"
+ - "--tls-cert-file=/var/serving-cert/tls.crt"
+ - "--tls-private-key-file=/var/serving-cert/tls.key"
+ - "--loglevel=${LOGLEVEL}"
+ - "--config=/var/apiserver-config/apiserver-config.yaml"
+ ports:
+ - containerPort: 8443
+ volumeMounts:
+ - mountPath: /var/serving-cert
+ name: serving-cert
+ - mountPath: /var/apiserver-config
+ name: apiserver-config
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8443
+ scheme: HTTPS
+ volumes:
+ - name: serving-cert
+ secret:
+ defaultMode: 420
+ secretName: apiserver-serving-cert
+ - name: apiserver-config
+ configMap:
+ defaultMode: 420
+ name: apiserver-config
+
+# to create the config for the TSB
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver-config
+ data:
+ apiserver-config.yaml: ${API_SERVER_CONFIG}
+
+# to be able to assign powers to the process
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to be able to expose TSB inside the cluster
+- apiVersion: v1
+ kind: Service
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert
+ spec:
+ selector:
+ apiserver: "true"
+ ports:
+ - port: 443
+ targetPort: 8443
+
+# This service account will be granted permission to call the TSB.
+# The token for this SA will be provided to the service catalog for
+# use when calling the TSB.
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+
+# This secret will be populated with a copy of the templateservicebroker-client SA's
+# auth token. Since this secret has a static name, it can be referenced more
+# easily than the auto-generated secret for the service account.
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+ annotations:
+ kubernetes.io/service-account.name: templateservicebroker-client
+ type: kubernetes.io/service-account-token
diff --git a/files/origin-components/rbac-template.yaml b/files/origin-components/rbac-template.yaml
new file mode 100644
index 000000000..0937a9065
--- /dev/null
+++ b/files/origin-components/rbac-template.yaml
@@ -0,0 +1,92 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: template-service-broker-rbac
+parameters:
+- name: NAMESPACE
+ value: openshift-template-service-broker
+- name: KUBE_SYSTEM
+ value: kube-system
+objects:
+
+# Grant the service account permission to call the TSB
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: templateservicebroker-client
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:templateservicebroker-client
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+
+# to delegate authentication and authorization
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: auth-delegator-${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: system:auth-delegator
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to have the template service broker powers
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: tsb-${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:controller:template-service-broker
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to read the config for terminating authentication
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ namespace: ${KUBE_SYSTEM}
+ name: extension-apiserver-authentication-reader-${NAMESPACE}
+ roleRef:
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# allow the kube service catalog's SA to read the static secret defined
+# above, which will contain the token for the SA that can call the TSB.
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: Role
+ metadata:
+ name: templateservicebroker-auth-reader
+ namespace: ${NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - templateservicebroker-client
+ resources:
+ - secrets
+ verbs:
+ - get
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-auth-reader
+ roleRef:
+ kind: Role
+ name: templateservicebroker-auth-reader
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 277695f78..f0f250480 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -716,6 +716,100 @@ def oo_openshift_env(hostvars):
# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
+def oo_component_persistent_volumes(hostvars, groups, component):
+ """ Generate list of persistent volumes based on oo_openshift_env
+ storage options set in host variables for a specific component.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if not issubclass(type(groups), dict):
+ raise errors.AnsibleFilterError("|failed expects groups is a dict")
+
+ persistent_volume = None
+
+ if component in hostvars['openshift']:
+ if 'storage' in hostvars['openshift'][component]:
+ params = hostvars['openshift'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ return persistent_volume
+
+
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
@@ -734,84 +828,122 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
+ if 'create_pv' in params:
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+ persistent_volumes.append(persistent_volume)
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ if 'logging' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'loggingops' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'metrics' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
return persistent_volumes
+def oo_component_pv_claims(hostvars, component):
+ """ Generate list of persistent volume claims based on oo_openshift_env
+ storage options set in host variables for a speicific component.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ if component in hostvars['openshift']:
+ if 'storage' in hostvars['openshift'][component]:
+ params = hostvars['openshift'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return persistent_volume_claim
+ return None
+
+
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables.
@@ -828,17 +960,31 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
+ if 'create_pv' in params:
+ if 'create_pvc' in params:
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'logging' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'loggingops' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'metrics' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
@@ -877,10 +1023,8 @@ def oo_pods_match_component(pods, deployment_type, component):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
image_prefix = 'openshift3/ose-'
- elif deployment_type == 'atomic-enterprise':
- image_prefix = 'aep3_beta/aep-'
matching_pods = []
image_regex = image_prefix + component + r'.*'
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
index 809e82488..c515f1a71 100644
--- a/filter_plugins/openshift_version.py
+++ b/filter_plugins/openshift_version.py
@@ -33,10 +33,10 @@ def legacy_gte_function_builder(name, versions):
returns True/False
"""
version_gte = False
- if 'enterprise' in deployment_type:
+ if deployment_type == 'openshift-enterprise':
if str(version) >= LooseVersion(enterprise_version):
version_gte = True
- elif 'origin' in deployment_type:
+ else:
if str(version) >= LooseVersion(origin_version):
version_gte = True
return version_gte
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 3110f409c..5245771d0 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -7,7 +7,7 @@ USER root
# Playbooks, roles, and their dependencies are installed from packages.
RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
&& yum repolist > /dev/null \
- && yum-config-manager --enable rhel-7-server-ose-3.6-rpms \
+ && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
&& rpm -q $INSTALL_PKGS \
diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template
index 739c0080f..1a009fa7b 100644
--- a/images/installer/root/exports/config.json.template
+++ b/images/installer/root/exports/config.json.template
@@ -24,7 +24,7 @@
"PLAYBOOK_FILE=$PLAYBOOK_FILE",
"ANSIBLE_CONFIG=$ANSIBLE_CONFIG"
],
- "cwd": "/opt/app-root/src/",
+ "cwd": "/usr/share/ansible/openshift-ansible",
"rlimits": [
{
"type": "RLIMIT_NOFILE",
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index 51ac566e5..70aa0bac3 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -39,7 +39,7 @@ if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then
fi
if [[ -v VAULT_PASS ]]; then
- VAULT_PASS_FILE=.vaultpass
+ VAULT_PASS_FILE="$(mktemp)"
echo ${VAULT_PASS} > ${VAULT_PASS_FILE}
VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}"
fi
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index dbe57bbd2..9d811fcab 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -34,17 +34,17 @@ openshift_deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -119,7 +119,7 @@ openshift_release=v3.6
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -491,10 +491,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
+#openshift_metrics_install_metrics=true
#
# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
@@ -504,29 +504,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
+#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
@@ -535,10 +535,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_metrics_deployer_version=v3.6.0
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7.0
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -548,36 +548,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
+#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
+#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
@@ -587,13 +587,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
+#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=v3.6.0
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -613,7 +613,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading these must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -635,7 +640,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure number of bits to allocate to each host’s subnet e.g. 9
# would mean a /23 network on the host.
-# When upgrading this must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 0d60de6d2..e6deda4ac 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -34,17 +34,17 @@ openshift_deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -119,7 +119,7 @@ openshift_release=v3.6
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -499,10 +499,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
+#openshift_metrics_install_metrics=true
#
# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
@@ -512,29 +512,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
+#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
@@ -543,10 +543,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/
+#openshift_metrics_image_version=3.7.0
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -556,36 +556,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
+#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
+#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
@@ -595,13 +595,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
+#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_logging_image_prefix=registry.example.com:8888/openshift3/
+#openshift_logging_image_version=3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -621,7 +621,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading these must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -643,7 +648,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure number of bits to allocate to each host’s subnet e.g. 9
# would mean a /23 network on the host.
-# When upgrading this must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 3be13145e..b5673cda1 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.126.0%{?dist}
+Release: 0.127.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,96 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0
+- Updating to always configure api aggregation with installation
+ (ewolinet@redhat.com)
+- Do not reconcile in >= 3.7 (simo@redhat.com)
+- Cleanup old deployment types (mgugino@redhat.com)
+- crio: ensure no default CNI configuration files are left
+ (gscrivan@redhat.com)
+- node: specify the DNS domain (gscrivan@redhat.com)
+- more retries on repoquery_cmd (lmeyer@redhat.com)
+- fix etcd back message error (jchaloup@redhat.com)
+- openshift_checks: enable providing file outputs (lmeyer@redhat.com)
+- Fix registry auth task ordering (mgugino@redhat.com)
+- Prometheus role fixes (zgalor@redhat.com)
+- papr: Update inventory to include required vars (smilner@redhat.com)
+- testing: Skip net vars on integration tests (smilner@redhat.com)
+- inventory: Update network variable doc (smilner@redhat.com)
+- installer image: use tmp file for vaultpass (lmeyer@redhat.com)
+- system container: use ansible root as cwd (lmeyer@redhat.com)
+- openshift_sanitize_inventory: Check for required vars (smilner@redhat.com)
+- No conversion to boolean and no quoting for include_granted_scopes.
+ (jpazdziora@redhat.com)
+- Correct firewall install for openshift-nfs (rteague@redhat.com)
+- inventory: Update versions to 3.7 (smilner@redhat.com)
+- Port origin-gce roles for cluster setup to copy AWS provisioning
+ (ccoleman@redhat.com)
+- Bug 1491636 - honor openshift_logging_es_ops_nodeselector
+ (jwozniak@redhat.com)
+- Setup tuned after the node has been restarted. (jmencak@redhat.com)
+- Only attempt to start iptables on hosts in the current batch
+ (sdodson@redhat.com)
+- Removing setting of pod presets (ewolinet@redhat.com)
+- cri-o: Fix Fedora image name (smilner@redhat.com)
+- add retry on repoquery_cmd (lmeyer@redhat.com)
+- add retries to repoquery module (lmeyer@redhat.com)
+- Rework openshift-cluster into deploy_cluster.yml (rteague@redhat.com)
+- inventory generate: fix config doc (lmeyer@redhat.com)
+- inventory generate: remove refs to openshift_cluster_user (lmeyer@redhat.com)
+- inventory generate: always use kubeconfig, no login (lmeyer@redhat.com)
+- Scaffold out the entire build defaults hash (tbielawa@redhat.com)
+- Use openshift.common.ip rather than ansible_default_ipv4 in etcd migration
+ playbook. (abutcher@redhat.com)
+- Add IMAGE_VERSION to the image stream tag source (sdodson@redhat.com)
+- Add loadbalancer config entry point (rteague@redhat.com)
+- pull openshift_master deps out into a play (jchaloup@redhat.com)
+- Don't assume storage_migration control variables are already boolean
+ (mchappel@redhat.com)
+- upgrade: Updates warning on missing required variables (smilner@redhat.com)
+- Update master config with new client urls during etcd scaleup.
+ (abutcher@redhat.com)
+- Increase rate limiting in journald.conf (maszulik@redhat.com)
+- Correct logic for openshift_hosted_*_wait (rteague@redhat.com)
+- Adding mangagement-admin SC to admin role for management-infra project
+ (ewolinet@redhat.com)
+- Only install base openshift package on masters and nodes (mgugino@redhat.com)
+- Workaround Ansible Jinja2 delimiter warning (rteague@redhat.com)
+- openshift-checks: add role symlink (lmeyer@redhat.com)
+- double the required disk space for etcd backup (jchaloup@redhat.com)
+- openshift_health_check: allow disabling all checks (lmeyer@redhat.com)
+- docker_image_availability: fix local image search (lmeyer@redhat.com)
+- docker_image_availability: probe registry connectivity (lmeyer@redhat.com)
+- openshift_checks: add retries in python (lmeyer@redhat.com)
+- add inventory-generator under new sub pkg (jvallejo@redhat.com)
+- Re-enabling new tuned profile hierarchy (PR5089) (jmencak@redhat.com)
+- Add `openshift_node_open_ports` to allow arbitrary firewall exposure
+ (ccoleman@redhat.com)
+- Fix: authenticated registry support for containerized hosts
+ (mgugino@redhat.com)
+- [Proposal] OpenShift-Ansible Proposal Process (rteague@redhat.com)
+- Improve searching when conditions for Jinja2 delimiters (rteague@redhat.com)
+- Clarify requirement of having etcd group (sdodson@redhat.com)
+- add health checks 3_6,3_7 upgrade path (jvallejo@redhat.com)
+- container-engine: Allow full image override (smilner@redhat.com)
+- Add openshift_public_hostname length check (mgugino@redhat.com)
+- Skip failure dedup instead of crashing (rhcarvalho@gmail.com)
+- Properly quote "true" and "false" strings for include_granted_scopes.
+ (jpazdziora@redhat.com)
+- Move sysctl.conf customizations to a separate file (jdesousa@redhat.com)
+- Fix new_master or new_node fail check (denverjanke@gmail.com)
+- [Proposal] OpenShift-Ansible Playbook Consolidation (rteague@redhat.com)
+- GlusterFS: Allow option to use or ignore default node selectors
+ (jarrpa@redhat.com)
+- GlusterFS: Clarify heketi URL documentation (jarrpa@redhat.com)
+- GlusterFS: Add files/templates for v3.7 (jarrpa@redhat.com)
+- Support setting annotations on Hawkular route (hansmi@vshn.ch)
+- add additional preflight checks to upgrade path (jvallejo@redhat.com)
+- hot fix for env variable resolve (m.judeikis@gmail.com)
+- GlusterFS: Correct firewall port names (jarrpa@redhat.com)
+- Make RH subscription more resilient to temporary failures
+ (lhuard@amadeus.com)
+
* Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0
- Fix rpm version logic for hosts (mgugino@redhat.com)
- Revert back to hostnamectl and previous default of not setting hostname
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index a3894e243..073ded6e0 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -7,6 +7,10 @@
tags:
- always
+- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
+ vars:
+ g_check_expiry_hosts: 'oo_etcd_to_config'
+
- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml
- include: ../../common/openshift-cluster/redeploy-certificates/masters.yml
@@ -14,6 +18,8 @@
- include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml
- include: ../../common/openshift-etcd/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
index 8516baee8..0f86eb997 100644
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
@@ -7,8 +7,14 @@
tags:
- always
+- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
+ vars:
+ g_check_expiry_hosts: 'oo_etcd_to_config'
+
- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml
- include: ../../common/openshift-etcd/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..32c828f97
--- /dev/null
+++ b/playbooks/byo/openshift-loadbalancer/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-loadbalancer/config.yml
diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml
new file mode 100644
index 000000000..93b24411e
--- /dev/null
+++ b/playbooks/byo/openshift-nfs/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-nfs/config.yml
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 1b14ff32e..06f914981 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,9 +8,9 @@
hosts: OSEv3
roles:
- role: rhel_subscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
+ when:
+ - deployment_type == 'openshift-enterprise'
+ - ansible_distribution == "RedHat"
+ - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false']
+ - role: openshift_repos
+ - role: os_update_latest
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index bbd5a0185..804ea8eb8 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -57,8 +57,27 @@
tags:
- hosted
+- name: Configure API Aggregation on masters
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
- include: service_catalog.yml
when:
- openshift_enable_service_catalog | default(false) | bool
tags:
- servicecatalog
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 16a733899..e55b2f964 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -43,11 +43,14 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- No etcd hosts defined. Running an all-in-one master is deprecated and
- will no longer be supported in a future upgrade.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
when:
- - g_etcd_hosts | default([]) | length == 0
- - not openshift_master_unsupported_all_in_one | default(False)
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
- not openshift_node_bootstrap | default(False)
- name: Evaluate oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 0723575c2..be2f8b5f4 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -93,8 +93,8 @@
state: present
with_items:
- iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}"
- - PyYAML
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- yum-utils
- name: Ensure various deps for running system containers are installed
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7af6b25bc..1b186f181 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,4 +1,12 @@
---
+- name: Set version_install_base_package true on masters and nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ - name: Set version_install_base_package true
+ set_fact:
+ version_install_base_package: True
+ when: version_install_base_package is not defined
+
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 75339f6df..0e970f376 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -19,31 +19,15 @@
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
- logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
- logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
+
roles:
- role: openshift_default_storage_class
when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- role: openshift_metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool
+ when: openshift_metrics_install_metrics | default(false) | bool
- role: openshift_logging
- when: openshift_hosted_logging_deploy | default(false) | bool
- openshift_hosted_logging_hostname: "{{ logging_hostname }}"
- openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
- openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
- openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}"
+ when: openshift_logging_install_logging | default(false) | bool
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
@@ -57,8 +41,6 @@
- hosted
pre_tasks:
- set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- - set_fact:
openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
@@ -66,10 +48,10 @@
- include_role:
name: openshift_logging
tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
+ when: openshift_logging_install_logging | default(false) | bool
- block:
- include_role:
name: openshift_metrics
tasks_from: update_master_config
- when: openshift_hosted_metrics_deploy | default(false) | bool
+ when: openshift_metrics_install_metrics | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
new file mode 100644
index 000000000..4a9fbf7eb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
@@ -0,0 +1,12 @@
+---
+- name: Check cert expirys
+ hosts: "{{ g_check_expiry_hosts }}"
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
index 6964e8567..3a8e32ed1 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -37,10 +37,17 @@
- name: Generate new etcd CA
hosts: oo_first_etcd
roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ - role: openshift_etcd_facts
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: ca
+ vars:
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ when:
+ - etcd_ca_setup | default(True) | bool
- name: Create temp directory for syncing certs
hosts: localhost
@@ -146,13 +153,19 @@
changed_when: false
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
index 6b5c805e6..16f0edb06 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
@@ -45,19 +45,23 @@
- name: Redeploy etcd certificates
hosts: oo_etcd_to_config
any_errors_fatal: true
- roles:
- - role: openshift_etcd_server_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_certificates_redeploy: true
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Redeploy etcd client certificates for masters
hosts: oo_masters_to_config
any_errors_fatal: true
roles:
+ - role: openshift_etcd_facts
- role: openshift_etcd_client_certificates
etcd_certificates_redeploy: true
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 089ae6bbc..b54acae6c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -7,7 +7,7 @@
when: not openshift.common.version_gte_3_2_or_1_2 | bool
- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config
+ hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -209,16 +209,22 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -268,13 +274,28 @@
changed_when: false
- include: ../../openshift-node/restart.yml
- # Do not restart nodes when node certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # Do not restart nodes when node, master or etcd certificates were previously expired.
+ when:
+ # nodes
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 599350258..529ee99be 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,20 +1,9 @@
---
-
-- name: Update Master configs
- hosts: oo_masters
- serial: 1
- tasks:
- - block:
- - include_role:
- name: openshift_service_catalog
- tasks_from: wire_aggregator
- vars:
- first_master: "{{ groups.oo_first_master[0] }}"
-
- name: Service Catalog
hosts: oo_first_master
roles:
- openshift_service_catalog
- ansible_service_broker
+ - template_service_broker
vars:
first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 39e82498d..a3446ef84 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -98,7 +98,11 @@
serial: 1
tasks:
- include_role:
- name: etcd_upgrade
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- ansible_distribution == 'Fedora'
- not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
index 831ca8f57..e5e895775 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: image
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
index 2e79451e0..a2a26bad4 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: rpm
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_rpm
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index 497709d25..ad6325ca0 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -11,3 +11,4 @@
checks:
- disk_availability
- memory_availability
+ - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
index 4c345dbe8..3c0017891 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -5,24 +5,9 @@
tasks:
- fail:
msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
+ This upgrade is only supported for origin and openshift-enterprise
deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
- # required when upgrading to avoid changes that may occur between releases
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
- - assert:
- that:
- - "osm_cluster_network_cidr is defined"
- - "osm_host_subnet_length is defined"
- - "openshift_portal_net is defined"
- msg: >
- osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
- variables when upgrading. These variables should match what is currently used in the cluster. If
- you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml
- on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr),
- hostSubnetLength (osm_host_subnet_length), and serviceNetworkCIDR (openshift_portal_net).
+ when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9b4a8e413..142ce5f3d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -27,13 +27,17 @@
- name: Set fact avail_openshift_version
set_fact:
- avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ - name: Set openshift_pkg_version when not specified
+ set_fact:
+ openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ when: openshift_pkg_version | default('') == ''
- name: Verify OpenShift RPMs are available for upgrade
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 164baca81..8cc46ab68 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -8,7 +8,6 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-
- name: Upgrade master packages
package: name={{ master_pkgs | join(',') }} state=present
vars:
@@ -16,7 +15,7 @@
- "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- PyYAML
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index b75aae589..4e73293f0 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -189,8 +189,6 @@
roles:
- { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
@@ -201,6 +199,7 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
+ when: not openshift.common.version_gte_3_7 | bool
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -215,7 +214,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ when: not openshift.common.version_gte_3_7 | bool
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -230,7 +229,7 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: openshift.common.version_gte_3_4_or_1_4 | bool
+ when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
- name: Reconcile Security Context Constraints
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index a3d0d6305..30e719d8f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -47,6 +47,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,10 +75,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 51acd17da..920dc2ffc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -47,6 +47,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,14 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 9fe059ac9..7c72564b6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -75,6 +75,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 1b10d4e37..6c1c7c921 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -68,6 +68,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 9ec40723a..87621dc85 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -47,6 +47,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,14 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index f97f34c3b..3549cf6c3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -75,6 +75,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index e95b90cd5..e5e04e643 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -68,6 +68,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index a2af7bb21..06e0607bd 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,11 +1,13 @@
---
- name: Run pre-checks
hosts: oo_etcd_to_migrate
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: check
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.pre_check
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
- name: Prepare masters for etcd data migration
@@ -65,25 +67,28 @@
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
gather_facts: no
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: migrate
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
- name: Clean data stores on remaining etcd hosts
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: clean_data
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
- post_tasks:
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
- name: Add etcd hosts
delegate_to: localhost
add_host:
@@ -112,21 +117,23 @@
- name: Add TTLs on the first master
hosts: oo_first_master[0]
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: add_ttls
- etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.add_ttls
+ vars:
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
when: etcd_migration_failed | length == 0
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: configure
- when: etcd_migration_failed | length == 0
tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.configure_master
+ when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
when:
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index af1ef245a..5eaea5ae8 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -7,3 +7,21 @@
service:
name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - name: stop etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+ - name: start etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 5f8bb1c7a..4f83264d0 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -23,6 +23,9 @@
-C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
register: etcd_add_check
retries: 3
delay: 10
@@ -53,3 +56,21 @@
retries: 3
delay: 30
until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+ roles:
+ - role: openshift_master_facts
+ post_tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index 7468c78f0..de467a722 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -17,7 +17,10 @@
- role: openshift_manageiq
when: openshift_use_manageiq | default(false) | bool
- role: cockpit
- when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
+ when:
+ - openshift.common.is_atomic
+ - deployment_type == 'openshift-enterprise'
+ - osm_use_cockpit is undefined or osm_use_cockpit | bool
+ - openshift.common.deployment_subtype != 'registry'
- role: flannel_register
when: openshift_use_flannel | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index c77d7bb87..2e7646372 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -35,7 +35,9 @@
file:
path: "/etc/origin/{{ item }}"
state: absent
- when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ when:
+ - rpmgenerated_config.stat.exists == true
+ - deployment_type == 'openshift-enterprise'
with_items:
- master
- node
@@ -179,39 +181,51 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.ip') | default([]) | join(',')
- }}"
- roles:
- - role: os_firewall
- - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_master_certificates
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
+ - role: openshift_master
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: nuage_ca
+ when: openshift_use_nuage | default(false) | bool
+ - role: nuage_common
+ when: openshift_use_nuage | default(false) | bool
- role: nuage_master
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
-
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 17f9ef4bc..8c366e038 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -43,6 +43,8 @@
delay: 1
changed_when: false
+- include: ../openshift-master/set_network_facts.yml
+
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
new file mode 100644
index 000000000..2ad805858
--- /dev/null
+++ b/playbooks/common/openshift-master/set_network_facts.yml
@@ -0,0 +1,28 @@
+---
+- name: Read first master\'s config
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - stat:
+ path: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_stat
+ - slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_slurp
+
+- name: Set network facts for masters
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - block:
+ - set_fact:
+ osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
+ when: osm_cluster_network_cidr is not defined
+ - set_fact:
+ osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}"
+ when: osm_host_subnet_length is not defined
+ - set_fact:
+ openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
+ when: openshift_portal_net is not defined
+ when:
+ - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..64ea0d3c4 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -2,5 +2,5 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 0801c41ff..5207ca9c8 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -65,12 +65,16 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
roles:
- - role: flannel
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ - role: openshift_facts
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_cert_prefix: flannel.etcd-
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ - role: flannel
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- role: calico
when: openshift_use_calico | default(false) | bool
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
new file mode 100644
index 000000000..a3d1d46a6
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -0,0 +1,19 @@
+---
+- name: Ensure all cloud resources necessary for the cluster, including instances, have been started
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+
+ - name: provision a GCP cluster in the specified project
+ include_role:
+ name: openshift_gcp
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index 12929b354..9eb9db316 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -1,6 +1,7 @@
---
ansible_service_broker_remove: false
+ansible_service_broker_install: false
ansible_service_broker_log_level: info
ansible_service_broker_output_request: false
ansible_service_broker_recovery: true
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
index b46ce8233..d8695bd3a 100644
--- a/roles/ansible_service_broker/tasks/main.yml
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -2,7 +2,7 @@
# do any asserts here
- include: install.yml
- when: not ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_install | default(false) | bool
- include: remove.yml
- when: ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_remove | default(false) | bool
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 39f730462..0e3863304 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -2,10 +2,14 @@
- name: Calico Node | Error if invalid cert arguments
fail:
msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
- when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
+ when:
+ - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined
+ - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
- name: Calico Node | Generate OpenShift-etcd certs
- include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ include_role:
+ name: etcd
+ tasks_from: client_certificates
when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
vars:
etcd_cert_prefix: calico.etcd-
@@ -28,18 +32,18 @@
msg: "Invalid etcd configuration for calico."
when: item is not defined or item == ''
with_items:
- - calico_etcd_ca_cert_file
- - calico_etcd_cert_file
- - calico_etcd_key_file
- - calico_etcd_endpoints
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file }}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file }}"
+ - "{{ calico_etcd_key_file }}"
- name: Calico Node | Configure Calico service unit file
template:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 7e206ded1..274fd8603 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,6 +1,20 @@
---
docker_cli_auth_config_path: '/root/.docker'
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input.
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_replace: False
+
+openshift_docker_additional_registries: []
+openshift_docker_blocked_registries: []
+openshift_docker_insecure_registries: []
+
+# The l2_docker_* variables convert csv strings to lists, if
+# necessary. These variables should be used in place of their respective
+# openshift_docker_* counterparts to ensure the properly formatted lists are
+# utilized.
+l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
+l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
+l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+
+containers_registries_conf_path: /etc/containers/registries.conf
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 78c6671d8..7ece0e061 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -26,3 +26,4 @@
include: systemcontainer_crio.yml
when:
- l_use_crio
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 145b552a6..0c5621259 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -51,22 +53,22 @@
- stat: path=/etc/sysconfig/docker
register: docker_check
-- name: Set registry params
+- name: Comment old registry params in /etc/sysconfig/docker
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}"
with_items:
- reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf
+ template:
+ dest: "{{ containers_registries_conf_path }}"
+ src: registries.conf
notify:
- restart docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 0bab0899c..e6fc2db06 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -108,18 +108,22 @@
l_crio_image_name: "cri-o"
when: ansible_distribution == "RedHat"
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}"
- when:
- - openshift_crio_systemcontainer_image_registry_override is defined
- - openshift_crio_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest"
+ # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
+ - name: Use a specific image if requested
+ set_fact:
+ l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
+ when:
+ - openshift_crio_systemcontainer_image_override is defined
+ - openshift_crio_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_crio_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
command: "atomic pull --storage ostree {{ l_crio_image }}"
@@ -134,6 +138,14 @@
image: "{{ l_crio_image }}"
state: latest
+- name: Remove CRI-o default configuration files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/cni/net.d/200-loopback.conf
+ - /etc/cni/net.d/100-crio-bridge.conf
+
- name: Create the CRI-O configuration
template:
dest: /etc/crio/crio.conf
diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf
new file mode 100644
index 000000000..c55dbd84f
--- /dev/null
+++ b/roles/docker/templates/registries.conf
@@ -0,0 +1,46 @@
+# {{ ansible_managed }}
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to YAML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries', 'insecure_registies',
+# and 'block_registries'.
+
+
+#registries:
+# - registry.access.redhat.com
+
+{% if l2_docker_additional_registries %}
+registries:
+{% for reg in l2_docker_additional_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to access insecure registries, uncomment the section below
+# and add the registries fully-qualified name. An insecure registry is one
+# that does not have a valid SSL certificate or only does HTTP.
+#insecure_registries:
+# -
+
+{% if l2_docker_insecure_registries %}
+insecure_registries:
+{% for reg in l2_docker_insecure_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#block_registries:
+# -
+
+{% if l2_docker_blocked_registries %}
+block_registries:
+{% for reg in l2_docker_blocked_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 9a955c822..537c01c0e 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -18,5 +18,5 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: etcd_server_certificates
+- role: lib_utils
- role: etcd_common
diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml
index 95a0e7c0a..95a0e7c0a 100644
--- a/roles/etcd_migrate/tasks/clean_data.yml
+++ b/roles/etcd/tasks/auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml
new file mode 100644
index 000000000..7cda49069
--- /dev/null
+++ b/roles/etcd/tasks/ca.yml
@@ -0,0 +1,2 @@
+---
+- include: ca/deploy.yml
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/ca/deploy.yml
index b4dea4a07..3d32290a2 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd/tasks/ca/deploy.yml
@@ -1,6 +1,8 @@
---
- name: Install openssl
- package: name=openssl state=present
+ package:
+ name: openssl
+ state: present
when: not etcd_is_atomic | bool
delegate_to: "{{ etcd_ca_host }}"
run_once: true
diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml
new file mode 100644
index 000000000..d131ffd21
--- /dev/null
+++ b/roles/etcd/tasks/clean_data.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml
new file mode 100644
index 000000000..2e9c078b9
--- /dev/null
+++ b/roles/etcd/tasks/client_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: client_certificates/fetch_from_ca.yml
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml
index bbd29ece1..119071a72 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/client_certificates/fetch_from_ca.yml
@@ -9,7 +9,7 @@
- fail:
msg: >
CA certificate {{ etcd_ca_cert }} doesn't exist on CA host
- {{ etcd_ca_host }}. Apply 'etcd_ca' role to
+ {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to
{{ etcd_ca_host }}.
when: not g_ca_cert_stat_result.stat.exists | bool
run_once: true
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 78e543ef1..870c11ad4 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,4 +1,6 @@
---
+- include: server_certificates.yml
+
- name: Set hostname and ip facts
set_fact:
# Store etcd_hostname and etcd_ip such that they will be available
diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml
new file mode 100644
index 000000000..bc27e4ea1
--- /dev/null
+++ b/roles/etcd/tasks/migrate.add_ttls.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/add_ttls.yml
diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml
new file mode 100644
index 000000000..3ada6e362
--- /dev/null
+++ b/roles/etcd/tasks/migrate.configure_master.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/configure_master.yml
diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml
new file mode 100644
index 000000000..124d21561
--- /dev/null
+++ b/roles/etcd/tasks/migrate.pre_check.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check.yml
diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml
new file mode 100644
index 000000000..5d5385873
--- /dev/null
+++ b/roles/etcd/tasks/migrate.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/migrate.yml
diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index c10465af9..14625e49e 100644
--- a/roles/etcd_migrate/tasks/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -8,6 +8,7 @@
accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}"
authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}"
controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"
+
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
oadm migrate etcd-ttl \
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd/tasks/migration/check.yml
index 0804d9e1c..0804d9e1c 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd/tasks/migration/check_cluster_health.yml
index 201d83f99..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd/tasks/migration/check_cluster_health.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd/tasks/migration/check_cluster_status.yml
index b69fb5a52..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd/tasks/migration/check_cluster_status.yml
diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd/tasks/migration/configure_master.yml
index a305d5bf3..a305d5bf3 100644
--- a/roles/etcd_migrate/tasks/configure.yml
+++ b/roles/etcd/tasks/migration/configure_master.yml
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd/tasks/migration/migrate.yml
index 54a9c74ff..54a9c74ff 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd/tasks/migration/migrate.yml
diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml
new file mode 100644
index 000000000..f0ba58b6e
--- /dev/null
+++ b/roles/etcd/tasks/server_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: server_certificates/fetch_from_ca.yml
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml
index 4795188a6..064fe1952 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/server_certificates/fetch_from_ca.yml
@@ -1,6 +1,12 @@
---
+- include: ../ca/deploy.yml
+ when:
+ - etcd_ca_setup | default(True) | bool
+
- name: Install etcd
- package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
+ package:
+ name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}"
+ state: present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml
index 136ec1142..cea95a1b3 100644
--- a/roles/etcd_upgrade/tasks/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_image.yml
@@ -29,8 +29,15 @@
## TODO: probably should just move this into the backup playbooks, also this
## will fail on atomic host. We need to revisit how to do etcd backups there as
## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Upgrade etcd for etcdctl when not atomic
- package: name=etcd state=latest
+ package:
+ name: etcd
+ state: latest
when: not l_ostree_booted.stat.exists | bool
- name: Verify cluster is healthy
diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
index 324b69605..324b69605 100644
--- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml
new file mode 100644
index 000000000..9e69027eb
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_image.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_image.yml
diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml
new file mode 100644
index 000000000..29603d2b6
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_rpm.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_rpm.yml
diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2
index f28316fc2..f28316fc2 100644
--- a/roles/etcd_ca/templates/openssl_append.j2
+++ b/roles/etcd/templates/openssl_append.j2
diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md
deleted file mode 100644
index 60a880e30..000000000
--- a/roles/etcd_ca/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-etcd_ca
-========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_client_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/etcd_client_certificates/meta/main.yml
deleted file mode 100644
index efebdb599..000000000
--- a/roles/etcd_client_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Client Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index b67411f40..6705e1ac5 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -73,3 +73,6 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_clien
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
+
+# etcd_peer needs to be set by a role caller
+etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index c1580640f..42d27c081 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -36,7 +36,7 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md
deleted file mode 100644
index 369e78ff2..000000000
--- a/roles/etcd_migrate/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Role Name
-=========
-
-Offline etcd migration of data from v2 to v3
-
-Requirements
-------------
-
-It is expected all consumers of the etcd data are not accessing the data.
-Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster.
-
-The role itself is responsible for:
-- checking etcd cluster health and raft status before the migration
-- checking of presence of any v3 data (in that case the migration is stopped)
-- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string)
-- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value)
-
-The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started.
-
-Role Variables
---------------
-
-TBD
-
-Dependencies
-------------
-
-- etcd_common
-- lib_utils
-
-Example Playbook
-----------------
-
-```yaml
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_etcd_migrate
- vars:
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jan Chaloupka (jchaloup@redhat.com)
diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml
deleted file mode 100644
index 05cf41fbb..000000000
--- a/roles/etcd_migrate/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# Default action when calling this role, choices: check, migrate, configure
-r_etcd_migrate_action: migrate
diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml
deleted file mode 100644
index f3cabbef6..000000000
--- a/roles/etcd_migrate/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description: Etcd migration
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: etcd_common }
-- { role: lib_utils }
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
deleted file mode 100644
index e82f6a6b4..000000000
--- a/roles/etcd_migrate/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Fail if invalid r_etcd_migrate_action provided
- fail:
- msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'"
- when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data']
-
-- name: Include main action task file
- include: "{{ r_etcd_migrate_action }}.yml"
-
-# 2. migrate v2 datadir into v3:
-# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl
-# backup the etcd datadir first
-# Provide a way for an operator to specify transformer
-
-# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml
-# set storage-backend to “etcd3”
-# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master))
-
-# Run
-# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health
-# to check the cluster health (from the etcdctl.sh aliases file)
-
-# Another assumption:
-# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting)
-# -
diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_server_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 4b6013a49..000000000
--- a/roles/etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
deleted file mode 100644
index 61bbba225..000000000
--- a/roles/etcd_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-r_etcd_upgrade_action: upgrade
-r_etcd_upgrade_mechanism: rpm
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
deleted file mode 100644
index afdb0267f..000000000
--- a/roles/etcd_upgrade/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
- r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
deleted file mode 100644
index 129c69d6b..000000000
--- a/roles/etcd_upgrade/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# INPUT r_etcd_upgrade_action
-- name: Fail if invalid etcd_upgrade_action provided
- fail:
- msg: "etcd_upgrade role can only be called with 'upgrade'"
- when:
- - r_etcd_upgrade_action not in ['upgrade']
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: l_ostree_booted
-
-- include: "{{ r_etcd_upgrade_action }}.yml"
diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml
deleted file mode 100644
index 420c9638e..000000000
--- a/roles/etcd_upgrade/tasks/upgrade.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# INPUT r_etcd_upgrade_version
-# INPUT r_etcd_upgrade_mechanism
-- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade
- fail:
- msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'"
- when:
- - r_etcd_upgrade_mechanism not in ['rpm', 'image']
-
-- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd"
- include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml
diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml
deleted file mode 100644
index 5ed919d42..000000000
--- a/roles/etcd_upgrade/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# EXPECTS etcd_peer
-etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 0c7347603..b9e15e6e0 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -27,8 +27,6 @@ Role Variables
Dependencies
------------
-openshift_facts
-
Example Playbook
----------------
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 35f825586..51128dba6 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -12,7 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_facts
-- role: openshift_etcd_client_certificates
- etcd_cert_prefix: flannel.etcd-
+dependencies: []
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 71c8f38c3..1d0f5df6a 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,6 +1,6 @@
---
flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
-flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
+flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 45d7444a4..1e6eb2386 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 231857cca..8c6a81cc8 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 44f3f57d8..4a7847e88 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 687cff579..b8af5cad9 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index ddf5d90b7..3364f8de3 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index c00eee381..c64d7ffd2 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 0c925ab0b..492494bda 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 567ecfd4e..b412ca8af 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 9515de569..8bbc22c49 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index d461e5ae9..ad17051cb 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 22ad58725..74a84ac89 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index b6c6e47d9..eea1516ae 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index f7fc286e0..dc33d3b8a 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 2206878a4..88fd9554d 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 126d7a617..8408f9ebc 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index d20904d0d..d1be0b534 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 91199d093..9a281e6cd 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index f9b2d81fa..b503c330b 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 895322ba5..7a9e3bf89 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 8f8e46e1e..875e473ad 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -769,7 +769,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 7130cc5fc..ec3635753 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 0c4b99e30..c010607e8 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -765,7 +765,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 7ab139e85..e83a6e26d 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 5d539ced4..0d46bbf96 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 97e213f46..662d77ec1 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 9339a85e5..574f109e4 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 2fa349547..e430546ee 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 55e1054e7..a12620968 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 63bad57b4..134b2ad19 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml
index c0d200e73..6a452ccec 100755
--- a/roles/lib_openshift/src/test/integration/oc_configmap.yml
+++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml
@@ -55,7 +55,7 @@
config: "{{ filename }}"
from_literal:
foo: notbar
- deployment_type: online
+ deployment_type: openshift-enterprise
- name: fetch the updated configmap
oc_configmap:
@@ -70,7 +70,7 @@
assert:
that:
- cmout.results.results[0].metadata.name == 'configmaptest'
- - cmout.results.results[0].data.deployment_type == 'online'
+ - cmout.results.results[0].data.deployment_type == 'openshift-enterprise'
- cmout.results.results[0].data.foo == 'notbar'
###### end update test ###########
diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
index 318fd6167..27042c64b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_configmap.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
@@ -79,7 +79,7 @@ class OCConfigMapTest(unittest.TestCase):
''' Testing a configmap create '''
params = copy.deepcopy(OCConfigMapTest.params)
params['from_file'] = {'test': '/root/file'}
- params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'}
+ params['from_literal'] = {'foo': 'bar', 'deployment_type': 'openshift-enterprise'}
configmap = '''{
"apiVersion": "v1",
@@ -100,7 +100,7 @@ class OCConfigMapTest(unittest.TestCase):
"apiVersion": "v1",
"data": {
"foo": "bar",
- "deployment_type": "online",
+ "deployment_type": "openshift-enterprise",
"test": "this is a file\\n"
},
"kind": "ConfigMap",
@@ -128,7 +128,7 @@ class OCConfigMapTest(unittest.TestCase):
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap')
- self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online')
+ self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'openshift-enterprise')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..cf5c2e423 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -792,7 +793,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 957c35a06..0a4fbe07a 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -590,7 +590,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index 3da340c85..e2f7af5ad 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,8 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: nuage_ca
-- role: nuage_common
-- role: openshift_etcd_client_certificates
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml
deleted file mode 100644
index f1d669d6b..000000000
--- a/roles/openshift_etcd_ca/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: Tim Bielawa
- description: Meta role around the etcd_ca role
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml
index 3268c390f..fbc72c8a3 100644
--- a/roles/openshift_etcd_client_certificates/meta/main.yml
+++ b/roles/openshift_etcd_client_certificates/meta/main.yml
@@ -11,6 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_client_certificates
+dependencies: []
diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml
new file mode 100644
index 000000000..7f8b667f0
--- /dev/null
+++ b/roles/openshift_etcd_client_certificates/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- include_role:
+ name: etcd
+ tasks_from: client_certificates
diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 7750f14af..000000000
--- a/roles/openshift_etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_server_certificates
diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md
index 8cc479c73..014cef111 100644
--- a/roles/openshift_examples/README.md
+++ b/roles/openshift_examples/README.md
@@ -21,13 +21,13 @@ Facts
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------------|-----------------------------------------------------|------------------------------------------|
-| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams |
-| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams |
-| openshift_examples_load_db_templates| true | Loads database templates |
-| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
-| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
+| Name | Default value | |
+|-------------------------------------|----------------------------------------------------------------|------------------------------------------|
+| openshift_examples_load_centos | true when openshift_deployment_type not 'openshift-enterprise' | Load centos image streams |
+| openshift_examples_load_rhel | true if openshift_deployment_type is 'openshift-enterprise' | Load rhel image streams |
+| openshift_examples_load_db_templates| true | Loads database templates |
+| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
+| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
Dependencies
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 517e0231d..1c2c91a5a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -477,11 +477,7 @@ def set_selectors(facts):
facts if they were not already present
"""
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'online':
- selector = "type=infra"
- else:
- selector = "region=infra"
+ selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
@@ -497,10 +493,10 @@ def set_selectors(facts):
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts['hosted']:
- facts['hosted']['logging'] = {}
- if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
- facts['hosted']['logging']['selector'] = None
+ if 'logging' not in facts:
+ facts['logging'] = {}
+ if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
+ facts['logging']['selector'] = None
if 'etcd' not in facts['hosted']:
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
@@ -568,7 +564,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -770,13 +766,11 @@ def set_deployment_facts_if_unset(facts):
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
- elif deployment_type in ['enterprise']:
- service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'docker' in facts:
deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
addtl_regs = facts['docker'].get('additional_registries', [])
ent_reg = 'registry.access.redhat.com'
if ent_reg not in addtl_regs:
@@ -787,30 +781,21 @@ def set_deployment_facts_if_unset(facts):
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
- elif deployment_type == 'atomic-enterprise':
- registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
- if 'disabled_features' in facts['master']:
- if deployment_type == 'atomic-enterprise':
- curr_disabled_features = set(facts['master']['disabled_features'])
- facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
- else:
+ if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
- if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
- else:
- facts['node']['storage_plugin_deps'] = []
+ facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
return facts
@@ -1671,7 +1656,7 @@ def set_container_facts_if_unset(facts):
facts
"""
deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
@@ -1681,16 +1666,6 @@ def set_container_facts_if_unset(facts):
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
- elif deployment_type == 'atomic-enterprise':
- master_image = 'aep3_beta/aep'
- cli_image = master_image
- node_image = 'aep3_beta/node'
- ovs_image = 'aep3_beta/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
- pod_image = 'aep3_beta/aep-pod'
- router_image = 'aep3_beta/aep-haproxy-router'
- registry_image = 'aep3_beta/aep-docker-registry'
- deployer_image = 'aep3_beta/aep-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
@@ -1810,7 +1785,10 @@ class OpenShiftFacts(object):
'etcd',
'hosted',
'master',
- 'node']
+ 'node',
+ 'logging',
+ 'loggingops',
+ 'metrics']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1927,7 +1905,7 @@ class OpenShiftFacts(object):
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
- hostname = choose_hostname(hostname_values, ip_addr)
+ hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
@@ -1991,66 +1969,6 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
- metrics=dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- loggingops=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- logging=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
etcd=dict(
storage=dict(
kind=None,
@@ -2097,6 +2015,69 @@ class OpenShiftFacts(object):
router=dict()
)
+ defaults['logging'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['loggingops'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es-ops',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['metrics'] = dict(
+ deploy=False,
+ duration=7,
+ resolution='10s',
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..eacf84b4d
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..e68e9683f
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Create SSH key for GCE
+if [ ! -f "{{ gce_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ gce_ssh_private_key }}" || true
+fi
+
+# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
+key_tmp_file='/tmp/ocp-gce-keys'
+if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+fi
+
+metadata=""
+if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+fi
+if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
+ echo "User data file missing at {{ provision_gce_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ provision_gce_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ provision_gce_registered_image }}"
+if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+else
+ echo "Network '{{ gce_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in provision_gce_node_groups %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
+ --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
+ --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
+ gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+else
+ echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+else
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+else
+ echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in provision_gce_node_groups %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..41ceab2b5
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in provision_gce_node_groups %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+
+# Internal master network rules
+teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in provision_gce_node_groups %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
+ teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ gce_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index d02a43655..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,8 +197,10 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
+
+ # determine if we're going to run the check (not inactive or disabled)
if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
index 2e60735d6..c72f4c5b3 100644
--- a/roles/openshift_health_checker/library/ocutil.py
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -40,18 +40,17 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- namespace=dict(type="str", required=True),
+ namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
- cmd = [
- locate_oc_binary(),
- '--config', module.params["config_file"],
- '-n', module.params["namespace"],
- ] + shlex.split(module.params["cmd"])
+ cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
+ if module.params["namespace"]:
+ cmd += ['-n', module.params["namespace"]]
+ cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 987c955b6..ce05b44a4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,15 +2,18 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
from ansible.module_utils import six
from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin
+from ansible.module_utils.six import string_types
from ansible.plugins.filter.core import to_bool as ansible_to_bool
@@ -28,7 +31,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -41,29 +44,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
# mainly for testing purposes; see execute_module_with_retries
self._module_retries = 3
self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -84,9 +111,20 @@ class OpenShiftCheck(object):
"""Returns true if this check applies to the ansible-playbook run."""
return True
+ def is_first_master(self):
+ """Determine if running on first master. Returns: bool"""
+ masters = self.get_var("groups", "oo_first_master", default=None) or [None]
+ return masters[0] == self.get_var("ansible_host")
+
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -98,7 +136,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -110,6 +184,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -117,7 +197,20 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
def execute_module_with_retries(self, module_name, module_args):
"""Run execute_module and retry on failure."""
@@ -188,8 +281,23 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
+
+ @staticmethod
+ def normalize(name_list):
+ """Return a clean list of names.
+
+ The input may be a comma-separated string or a sequence. Leading and
+ trailing whitespace characters are removed. Empty items are discarded.
+ """
+ if isinstance(name_list, string_types):
+ name_list = name_list.split(',')
+ return [name.strip() for name in name_list if name.strip()]
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -231,7 +339,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -259,7 +369,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py
new file mode 100644
index 000000000..1cfdc1129
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py
@@ -0,0 +1,62 @@
+"""
+A check to run relevant diagnostics via `oc adm diagnostics`.
+"""
+
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+DIAGNOSTIC_LIST = (
+ "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles "
+ "ClusterRouter DiagnosticPod NetworkCheck"
+).split()
+
+
+class DiagnosticCheck(OpenShiftCheck):
+ """A check to run relevant diagnostics via `oc adm diagnostics`."""
+
+ name = "diagnostics"
+ tags = ["health"]
+
+ def is_active(self):
+ return super(DiagnosticCheck, self).is_active() and self.is_first_master()
+
+ def run(self):
+ if self.exec_diagnostic("ConfigContexts"):
+ # only run the other diagnostics if that one succeeds (otherwise, all will fail)
+ diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST)
+ for diagnostic in self.normalize(diagnostics):
+ self.exec_diagnostic(diagnostic)
+ return {}
+
+ def exec_diagnostic(self, diagnostic):
+ """
+ Execute an 'oc adm diagnostics' command on the remote host.
+ Raises OcNotFound or registers OcDiagFailed.
+ Returns True on success or False on failure (non-zero rc).
+ """
+ config_base = self.get_var("openshift.common.config_base")
+ args = {
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": "adm diagnostics",
+ "extra_args": [diagnostic],
+ }
+
+ result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json")
+ self.register_file(diagnostic + ".txt", result['result'])
+ if result.get("failed"):
+ if result['result'] == '[Errno 2] No such file or directory':
+ raise OpenShiftCheckException(
+ "OcNotFound",
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+
+ self.register_failure(OpenShiftCheckException(
+ 'OcDiagFailed',
+ 'The {diag} diagnostic reported an error:\n'
+ '{error}'.format(diag=diagnostic, error=result['result'])
+ ))
+ return False
+ return True
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..cdf56e959 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck):
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 9c35f0f92..98372d979 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -109,8 +109,6 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# containerized etcd may not have openshift_image_tag, see bz 1466622
image_tag = self.get_var("openshift_image_tag", default="latest")
image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
- if not image_info:
- return required
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
@@ -160,7 +158,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
deployment_type = self.get_var("openshift_deployment_type")
if deployment_type == "origin" and "docker.io" not in regs:
regs.append("docker.io")
- elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
+ elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
regs.append("registry.access.redhat.com")
return regs
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
index e5d93ff3f..79955cb2f 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -16,7 +16,7 @@ class EtcdVolume(OpenShiftCheck):
def is_active(self):
etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
- is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+ is_etcd_host = self.get_var("ansible_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
def run(self):
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..05ba73ca1 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck):
logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False)
return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
- def is_first_master(self):
- """Determine if running on first master. Returns: bool"""
- # Note: It would be nice to use membership in oo_first_master group, however for now it
- # seems best to avoid requiring that setup and just check this is the first master.
- hostname = self.get_var("ansible_ssh_host") or [None]
- masters = self.get_var("groups", "masters", default=None) or [None]
- return masters[0] == hostname
-
def run(self):
return {}
@@ -78,7 +70,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +84,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 24f1d938a..b90ebf6dd 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -49,5 +49,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 58864da21..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -98,13 +111,18 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
@@ -128,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monke
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -145,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -158,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -171,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -254,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py
new file mode 100644
index 000000000..800889fa7
--- /dev/null
+++ b/roles/openshift_health_checker/test/diagnostics_test.py
@@ -0,0 +1,50 @@
+import pytest
+
+from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException
+
+
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(config_base="/etc/origin/")
+ )
+ )
+
+
+def test_module_succeeds(task_vars):
+ check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars)
+ check.is_first_master = lambda: True
+ assert check.is_active()
+ check.exec_diagnostic("spam")
+ assert not check.failures
+
+
+def test_oc_not_there(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "[Errno 2] No such file or directory"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_diagnostic("spam")
+ assert excinfo.value.name == "OcNotFound"
+
+
+def test_module_fails(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "something broke"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ check.exec_diagnostic("spam")
+ assert check.failures and check.failures[0].name == "OcDiagFailed"
+
+
+def test_names_executed(task_vars):
+ task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs"
+
+ def exec_module(module, args, *_):
+ assert "extra_args" in args
+ assert args["extra_args"][0] in diagnostics
+ return {"result": "success"}
+
+ DiagnosticCheck(exec_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 6a7c16c7e..952fa9aa6 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -23,8 +23,6 @@ def task_vars():
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
- ("enterprise", True, [], False),
- ("online", True, [], False),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
@@ -103,6 +101,39 @@ def test_all_images_unavailable(task_vars):
assert "required Docker images are not available" in actual['msg']
+def test_no_known_registries():
+ def execute_module(module_name=None, *_):
+ if module_name == "command":
+ return {
+ 'failed': True,
+ }
+
+ return {
+ 'changed': False,
+ }
+
+ def mock_known_docker_registries():
+ return []
+
+ dia = DockerImageAvailability(execute_module, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_image_tag='latest',
+ group_names=['nodes', 'masters'],
+ ))
+ dia.known_docker_registries = mock_known_docker_registries
+ actual = dia.run()
+ assert actual['failed']
+ assert "Unable to retrieve any docker registries." in actual['msg']
+
+
@pytest.mark.parametrize("message,extra_words", [
(
"docker image update failure",
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index 1a1c190f6..59c703214 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -98,21 +98,19 @@ def test_oc_failure(problem, expect):
assert expect in str(excinfo)
-groups_with_first_master = dict(masters=['this-host', 'other-host'])
-groups_with_second_master = dict(masters=['other-host', 'this-host'])
-groups_not_a_master = dict(masters=['other-host'])
+groups_with_first_master = dict(oo_first_master=['this-host'])
+groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host'])
@pytest.mark.parametrize('groups, logging_deployed, is_active', [
(groups_with_first_master, True, True),
(groups_with_first_master, False, False),
(groups_not_a_master, True, False),
- (groups_with_second_master, True, False),
(groups_not_a_master, True, False),
])
def test_is_active(groups, logging_deployed, is_active):
task_vars = dict(
- ansible_ssh_host='this-host',
+ ansible_host='this-host',
groups=groups,
openshift_hosted_logging_deploy=logging_deployed,
)
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 8aa87ca59..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -49,7 +49,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 7d9035a36..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,7 +2,7 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index a8a6f6fc8..434b679df 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 631bf3e2a..53d1a8bc7 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -8,9 +8,10 @@
- name: Set hosted facts
openshift_facts:
- role: hosted
+ role: "{{ item }}"
openshift_env: "{{ hostvars
| oo_merge_hostvars(vars, inventory_hostname)
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
+ with_items: [hosted, logging, loggingops, metrics]
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
deleted file mode 100644
index 680303853..000000000
--- a/roles/openshift_hosted_logging/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-###Required vars:
-
-- openshift_hosted_logging_hostname: kibana.example.com
-- openshift_hosted_logging_elasticsearch_cluster_size: 1
-- openshift_hosted_logging_master_public_url: https://localhost:8443
-
-###Optional vars:
-- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/
-- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead
-- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software.
-- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key
-- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3
-- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying.
-- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory).
-- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead.
-- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`.
-- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster).
-- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534).
-- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value)
-- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to.
-- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
-- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
-- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
-- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
-- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE*
-- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location.
-- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources.
-
-When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some
-additional vars. These work the same as above for their non-ops counterparts,
-but apply to the OPS cluster instance:
-- openshift_hosted_logging_ops_hostname: kibana-ops.example.com
-- openshift_hosted_logging_elasticsearch_ops_cluster_size
-- openshift_hosted_logging_elasticsearch_ops_instance_ram
-- openshift_hosted_logging_elasticsearch_ops_pvc_size
-- openshift_hosted_logging_elasticsearch_ops_pvc_prefix
-- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic
-- openshift_hosted_logging_elasticsearch_ops_nodeselector
-- openshift_hosted_logging_kibana_ops_nodeselector
-- openshift_hosted_logging_curator_ops_nodeselector
diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml
deleted file mode 100644
index d7e83fe9a..000000000
--- a/roles/openshift_hosted_logging/handlers/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
deleted file mode 100644
index ab07a77c1..000000000
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
deleted file mode 100644
index 70b0d67a4..000000000
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
-
-- name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
-
-
-- name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
-
-- name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
-
-- name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
-
-- name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
-
-
-- name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
-
-
-- name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
-
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
-
-- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
deleted file mode 100644
index 78b624109..000000000
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
----
-- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
-- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
-- name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
-- name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
-- name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
-
-- name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
-- name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
-- name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
-
-- name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
-
-- name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
-
-- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
-
-- name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
-
-- name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
-- name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
-- name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
-- name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
-- name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
-- name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
-- name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
-- name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- include: update_master_config.yaml
-
-- debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml
deleted file mode 100644
index 42568597a..000000000
--- a/roles/openshift_hosted_logging/tasks/main.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cleanup logging deployment
- include: "{{ role_path }}/tasks/cleanup_logging.yaml"
- when: openshift_hosted_logging_cleanup | default(false) | bool
-
-- name: Deploy logging
- include: "{{ role_path }}/tasks/deploy_logging.yaml"
- when: not openshift_hosted_logging_cleanup | default(false) | bool
diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml
deleted file mode 100644
index 1122e059c..000000000
--- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Adding Kibana route information to loggingPublicURL
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.loggingPublicURL
- yaml_value: "https://{{ logging_hostname }}"
- notify: restart master
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
deleted file mode 100644
index 4b350b244..000000000
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
-ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
-iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
-oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}"
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}"
-kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}"
-pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}"
-es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}"
-es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}"
-es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}"
-es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}"
-es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}"
-es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}"
-es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}"
-es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}"
-es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}"
-es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}"
-es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}"
-es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}"
-es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}"
-fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}"
-kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}"
-kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}"
-cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}"
-cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}"
-ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}"
-journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}"
-journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}"
-ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}"
-deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 716f0e002..6699e2062 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,15 +1,17 @@
---
-openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
+openshift_logging_use_ops: False
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}"
openshift_logging_namespace: logging
openshift_logging_nodeselector: null
openshift_logging_labels: {}
openshift_logging_label_key: ""
openshift_logging_label_value: ""
-openshift_logging_install_logging: True
+openshift_logging_install_logging: False
+openshift_logging_uninstall_logging: False
+
openshift_logging_purge_logging: False
-openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_image_pull_secret: ""
openshift_logging_curator_default_days: 30
openshift_logging_curator_run_hour: 0
@@ -19,13 +21,13 @@ openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
openshift_logging_curator_cpu_limit: 100m
openshift_logging_curator_memory_limit: null
-openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_nodeselector: {}
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
@@ -34,8 +36,8 @@ openshift_logging_kibana_proxy_memory_limit: 96Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
-openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_kibana_nodeselector: {}
+openshift_logging_kibana_ops_nodeselector: {}
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
@@ -49,7 +51,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
@@ -69,12 +71,12 @@ openshift_logging_kibana_ops_key: ""
#for the public facing ops kibana certs
openshift_logging_kibana_ops_ca: ""
-openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
-openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+openshift_logging_fluentd_journal_source: ""
+openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
openshift_logging_fluentd_buffer_queue_limit: 1024
openshift_logging_fluentd_buffer_size_limit: 1m
@@ -84,18 +86,18 @@ openshift_logging_es_port: 9200
openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
-openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
+openshift_logging_es_cluster_size: 1
openshift_logging_es_cpu_limit: 1000m
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
-openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
-openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
-openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
-openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+openshift_logging_es_memory_limit: "8Gi"
+openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}"
+openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}"
+openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}"
+openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
@@ -125,16 +127,16 @@ openshift_logging_es_ops_port: 9200
openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
-openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
+openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
-openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
-openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
-openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
+openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
+openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
+openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
+openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
@@ -153,7 +155,7 @@ openshift_logging_es_ops_key: ""
openshift_logging_es_ops_ca_ext: ""
# storage related defaults
-openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..de5e25061 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -134,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -165,6 +166,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index f475024dd..0da9771c7 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -30,12 +30,13 @@
check_mode: no
become: no
-- include: "{{ role_path }}/tasks/install_logging.yaml"
- when: openshift_logging_install_logging | default(false) | bool
+- include: install_logging.yaml
+ when:
+ - openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_logging.yaml"
+- include: delete_logging.yaml
when:
- - not openshift_logging_install_logging | default(false) | bool
+ - openshift_logging_uninstall_logging | default(false) | bool
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 4c8d6fdad..73e935d3f 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -20,8 +20,8 @@ r_openshift_master_os_firewall_allow:
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index b0237141b..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 894fe8e2b..82b4b420c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -180,6 +180,28 @@
- name: Install the systemd units
include: systemd_units.yml
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
+
- name: Install Master system container
include: system_container.yml
when:
@@ -200,7 +222,7 @@
- restart master api
- set_fact:
- translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
@@ -229,8 +251,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- include: registry_auth.yml
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 96b6c614e..2644f235e 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -1,27 +1,35 @@
---
+# We need to setup some variables as this play might be called directly
+# from outside of the role.
+- set_fact:
+ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+ when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+ when: oreg_host is not defined
+
- name: Check for credentials file for registry auth
stat:
path: "{{ oreg_auth_credentials_path }}"
when: oreg_auth_user is defined
register: master_oreg_auth_credentials_stat
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
- - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create
notify:
- restart master api
- restart master controllers
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
when:
+ - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
+ - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 7a918c57e..8de62c59a 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -17,6 +17,8 @@
r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
when: r_openshift_master_data_dir is not defined
+- include: registry_auth.yml
+
- name: Remove the legacy master service if it exists
include: clean_systemd_units.yml
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index a184a59f6..5d4a99c97 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 2ded05f53..f93f3b565 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -19,7 +19,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index cf39b73f6..0c681c764 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,22 @@ openshift_master_valid_grant_methods:
- deny
openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 5558f55cb..f7f3ac2b1 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -6,10 +6,6 @@ Custom filters for use in openshift-master
import copy
import sys
-# pylint import-error disabled because pylint cannot find the package
-# when installed in a virtualenv
-from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
-
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
@@ -82,23 +78,8 @@ class IdentityProviderBase(object):
self._allow_additional = True
@staticmethod
- def validate_idp_list(idp_list, openshift_version, deployment_type):
+ def validate_idp_list(idp_list):
''' validates a list of idps '''
- login_providers = [x.name for x in idp_list if x.login]
-
- multiple_logins_unsupported = False
- if len(login_providers) > 1:
- if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
- if LooseVersion(openshift_version) < LooseVersion('3.2'):
- multiple_logins_unsupported = True
- if deployment_type in ['origin']:
- if LooseVersion(openshift_version) < LooseVersion('1.2'):
- multiple_logins_unsupported = True
- if multiple_logins_unsupported:
- raise errors.AnsibleFilterError("|failed multiple providers are "
- "not allowed for login. login "
- "providers: {0}".format(', '.join(login_providers)))
-
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
@@ -380,11 +361,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = '"true"' if val else '"false"'
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
@@ -476,7 +452,7 @@ class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
- def translate_idps(idps, api_version, openshift_version, deployment_type):
+ def translate_idps(idps, api_version):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
@@ -492,7 +468,7 @@ class FilterModule(object):
idp_inst.set_provider_items()
idp_list.append(idp_inst)
- IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
+ IdentityProviderBase.validate_idp_list(idp_list)
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index c45f255af..f27eb629d 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -101,7 +101,7 @@ class LookupModule(LookupBase):
{'name': 'MatchInterPodAffinity'}
])
- if short_version in ['3.5', '3.6', '3.7']:
+ if short_version in ['3.5', '3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
@@ -114,6 +114,21 @@ class LookupModule(LookupBase):
{'name': 'CheckNodeDiskPressure'},
])
+ if short_version in ['3.7']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
if regions_enabled:
region_predicate = {
'name': 'Region',
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 4a28fb8f8..38a918803 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -57,6 +57,20 @@ DEFAULT_PREDICATES_1_5 = [
DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+DEFAULT_PREDICATES_3_7 = [
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+]
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -79,6 +93,8 @@ TEST_VARS = [
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('3.6', 'origin', DEFAULT_PREDICATES_3_6),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
+ ('3.7', 'origin', DEFAULT_PREDICATES_3_7),
+ ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7),
]
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 1f10de4a2..ed698daca 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -39,6 +39,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics.
+- `openshift_metrics_hawkular_route_annotations`: Dictionary with annotations for the Hawkular route.
+
- `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the
initial cluster.
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index d9a17ae7f..ed0182ba8 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -1,6 +1,7 @@
---
openshift_metrics_start_cluster: True
-openshift_metrics_install_metrics: True
+openshift_metrics_install_metrics: False
+openshift_metrics_uninstall_metrics: False
openshift_metrics_startup_timeout: 500
openshift_metrics_hawkular_replicas: 1
@@ -12,11 +13,12 @@ openshift_metrics_hawkular_cert: ""
openshift_metrics_hawkular_key: ""
openshift_metrics_hawkular_ca: ""
openshift_metrics_hawkular_nodeselector: ""
+openshift_metrics_hawkular_route_annotations: {}
openshift_metrics_cassandra_replicas: 1
-openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
-openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
-openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}"
+openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}"
+openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -53,8 +55,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
-openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index 6b37f85ab..b63f5ca8c 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -40,6 +40,7 @@
dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml"
vars:
name: hawkular-metrics
+ annotations: "{{ openshift_metrics_hawkular_route_annotations }}"
labels:
metrics-infra: hawkular-metrics
host: "{{ openshift_metrics_hawkular_hostname }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index eaabdd20f..0461039fc 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -43,7 +43,13 @@
check_mode: no
tags: metrics_init
-- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: install_metrics.yaml
+ when:
+ - openshift_metrics_install_metrics | default(false) | bool
+
+- include: uninstall_metrics.yaml
+ when:
+ - openshift_metrics_uninstall_metrics | default(false) | bool
- include: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 423ab54a3..253d6ecf5 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -2,6 +2,9 @@ apiVersion: v1
kind: Route
metadata:
name: {{ name }}
+{% if annotations is defined %}
+ annotations: {{ annotations | to_yaml }}
+{% endif %}
{% if labels is defined and labels %}
labels:
{% for k, v in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml
index 678c4104c..8704ddfa0 100644
--- a/roles/openshift_metrics/vars/default_images.yml
+++ b/roles/openshift_metrics/vars/default_images.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}"
+__openshift_metrics_image_prefix: "docker.io/openshift/origin-"
+__openshift_metrics_image_version: "latest"
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f0bdac7d2..68cdf06fe 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}"
+__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_metrics_image_version: "v3.6"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 5424a64d2..ed3516d04 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -60,7 +60,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -79,9 +79,11 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 855b0a8d8..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 1504d01af..2759188f3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,10 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Setup tuned
- include: tuned.yml
- static: yes
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -99,5 +95,9 @@
msg: Node failed to start please inspect the logs and try again
when: node_start_result | failed
+- name: Setup tuned
+ include: tuned.yml
+ static: yes
+
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index ff8d1942c..e82fb42b8 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -2,7 +2,8 @@
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
+ - (not ansible_selinux or ansible_selinux.status != 'enabled')
+ - deployment_type == 'openshift-enterprise'
- not openshift_use_crio | default(false)
- name: setup firewall
@@ -59,25 +60,22 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- include: registry_auth.yml
-
- name: Configure AWS Cloud Provider Settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index b2dceedbe..0ca44c292 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -9,4 +9,6 @@
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ values:
+ - "DNS_DOMAIN={{ openshift.common.dns_domain }}"
state: latest
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index f370bb260..3d2831742 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -5,21 +5,20 @@
when: oreg_auth_user is defined
register: node_oreg_auth_credentials_stat
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
- - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
notify:
- restart node
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
when:
+ - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8734e7443..fa7238849 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 61d2a5b51..df02bcf0e 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -114,6 +114,8 @@ EOF
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ elif ! grep -qw search ${NEW_RESOLV_CONF}; then
+ echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
diff --git a/filter_plugins/openshift_node.py b/roles/openshift_node_facts/filter_plugins/filters.py
index 50c360e97..69069f2dc 100644
--- a/filter_plugins/openshift_node.py
+++ b/roles/openshift_node_facts/filter_plugins/filters.py
@@ -7,10 +7,10 @@ from ansible import errors
class FilterModule(object):
- ''' Custom ansible filters for use by openshift_node role'''
+ ''' Custom ansible filters for use by openshift_node_facts role'''
@staticmethod
- def get_dns_ip(openshift_dns_ip, hostvars):
+ def node_get_dns_ip(openshift_dns_ip, hostvars):
''' Navigates the complicated logic of when to set dnsIP
In all situations if they've set openshift_dns_ip use that
@@ -29,4 +29,4 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
- return {'get_dns_ip': self.get_dns_ip}
+ return {'node_get_dns_ip': self.node_get_dns_ip}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index c268c945e..fd4c49504 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -30,5 +30,5 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 3d8704308..6507b015d 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -4,3 +4,9 @@ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
+oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index e34319186..6bcf3072d 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -10,6 +10,8 @@
# tasks file for openshift_node_upgrade
+- include: registry_auth.yml
+
- name: Stop node and openvswitch services
service:
name: "{{ item }}"
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
new file mode 100644
index 000000000..3d2831742
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -0,0 +1,24 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or oreg_auth_credentials_replace.changed) | bool
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 4c47f8c0d..aae35719c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 451412ab0..864e4b5d6 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 8d3d010e4..19e9a56b7 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,5 +9,4 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies:
-- role: openshift_hosted_facts
+dependencies: {}
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 18d6a1645..5aa8aecec 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -11,7 +11,7 @@ openshift_prometheus_node_selector: {"region":"infra"}
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
-openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 93bdda3e8..a9bce2fb1 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -107,7 +107,10 @@
- name: annotate prometheus service
command: >
{{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
- name: annotate alerts service
command: >
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index abd1997dd..ce3b51454 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -1,4 +1,4 @@
-OpenShift Repos
+OpenShift Repos
================
Configures repositories for an OpenShift installation
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | None | Possible values openshift-enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py
new file mode 100644
index 000000000..d42c9bdb9
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py
@@ -0,0 +1,25 @@
+'''
+ Openshift Logging class that provides useful filters used in Logging.
+
+ This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
new file mode 100644
index 000000000..f61801714
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+
+""" Ansible module to help with setting facts conditionally based on other facts """
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: conditional_set_fact
+
+short_description: This will set a fact if the value is defined
+
+description:
+ - "To avoid constant set_fact & when conditions for each var we can use this"
+
+author:
+ - Eric Wolinetz ewolinet@redhat.com
+'''
+
+
+EXAMPLES = '''
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+ fact2: defined_variable
+
+'''
+
+
+def run_module():
+ """ The body of the module, we check if the variable name specified as the value
+ for the key is defined. If it is then we use that value as for the original key """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ facts=dict(type='dict', required=True),
+ vars=dict(required=False, type='dict', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ local_facts = dict()
+ is_changed = False
+
+ for param in module.params['vars']:
+ other_var = module.params['vars'][param]
+
+ if other_var in module.params['facts']:
+ local_facts[param] = module.params['facts'][other_var]
+ if not is_changed:
+ is_changed = True
+
+ return module.exit_json(changed=is_changed, # noqa: F405
+ ansible_facts=local_facts)
+
+
+def main():
+ """ main """
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
new file mode 100644
index 000000000..e52ab5f6d
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
@@ -0,0 +1,48 @@
+---
+# this is used to set the logging variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ logging_hostname: openshift_hosted_logging_hostname
+ logging_ops_hostname: openshift_hosted_logging_ops_hostname
+ logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size
+ logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size
+ openshift_logging_storage_kind: openshift_hosted_logging_storage_kind
+ openshift_logging_storage_host: openshift_hosted_logging_storage_host
+ openshift_logging_storage_labels: openshift_hosted_logging_storage_labels
+ openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size
+ openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind
+ openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host
+ openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels
+ openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size
+ openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster
+ openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret
+ openshift_logging_kibana_hostname: openshift_hosted_logging_hostname
+ openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname
+ openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source
+ openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head
+ openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram
+ openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector
+ openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram
+ openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes
+ openshift_logging_master_public_url: openshift_hosted_logging_master_public_url
+ openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix
+ openshift_logging_image_version: openshift_hosted_logging_deployer_version
+ openshift_logging_install_logging: openshift_hosted_logging_deploy
+
+
+- set_fact:
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+ openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
new file mode 100644
index 000000000..279646981
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
@@ -0,0 +1,17 @@
+---
+# this is used to set the metrics variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes
+ openshift_metrics_storage_host: openshift_hosted_metrics_storage_host
+ openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory
+ openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name
+ openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size
+ openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels
+ openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix
+ openshift_metrics_image_version: openshift_hosted_metrics_deployer_version
+ openshift_metrics_install_metrics: openshift_hosted_metrics_deploy
+ openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
new file mode 100644
index 000000000..94d3acffc
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -0,0 +1,21 @@
+---
+
+- name: Check for usage of deprecated variables
+ set_fact:
+ __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']"
+ when:
+ - hostvars[inventory_hostname][item] is defined
+ with_items: "{{ __warn_deprecated_vars }}"
+
+- block:
+ - debug: msg="{{__deprecation_message}}"
+ - pause:
+ seconds: "{{ 10 }}"
+ when:
+ - __deprecation_message | default ('') | length > 0
+
+# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
+- name: Assign deprecated variables to correct counterparts
+ include: "{{ item }}"
+ with_fileglob:
+ - "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..e327ee9f5 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# We should print out deprecations prior to any failures so that if a play does fail for other reasons
+# the user would also be aware of any deprecated variables they should note to adjust
+- include: deprecations.yml
+
- name: Abort when conflicting deployment type variables are set
when:
- deployment_type is defined
diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index da48e42c1..0fc2372d2 100644
--- a/roles/openshift_sanitize_inventory/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
@@ -1,7 +1,78 @@
---
# origin uses community packages named 'origin'
-# online currently uses 'openshift' packages
-# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
-# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
-# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
+# openshift-enterprise uses Red Hat packages named 'atomic-openshift'
+known_openshift_deployment_types: ['origin', 'openshift-enterprise']
+
+__deprecation_header: "[DEPRECATION WARNING]:"
+
+# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release
+__warn_deprecated_vars:
+ # logging
+ - 'openshift_hosted_logging_deploy'
+ - 'openshift_hosted_logging_hostname'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_master_public_url'
+ - 'openshift_hosted_logging_elasticsearch_cluster_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_cluster_size'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_loggingops_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_storage_kind'
+ - 'openshift_hosted_loggingops_storage_kind'
+ - 'openshift_hosted_logging_storage_host'
+ - 'openshift_hosted_loggingops_storage_host'
+ - 'openshift_hosted_logging_storage_nfs_directory'
+ - 'openshift_hosted_loggingops_storage_nfs_directory'
+ - 'openshift_hosted_logging_storage_volume_name'
+ - 'openshift_hosted_loggingops_storage_volume_name'
+ - 'openshift_hosted_logging_storage_volume_size'
+ - 'openshift_hosted_loggingops_storage_volume_size'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_deployer_prefix'
+ - 'openshift_hosted_logging_deployer_version'
+ # metrics
+ - 'openshift_hosted_metrics_deploy'
+ - 'openshift_hosted_metrics_storage_kind'
+ - 'openshift_hosted_metrics_storage_access_modes'
+ - 'openshift_hosted_metrics_storage_host'
+ - 'openshift_hosted_metrics_storage_nfs_directory'
+ - 'openshift_hosted_metrics_storage_volume_name'
+ - 'openshift_hosted_metrics_storage_volume_size'
+ - 'openshift_hosted_metrics_storage_labels'
+ - 'openshift_hosted_metrics_deployer_prefix'
+ - 'openshift_hosted_metrics_deployer_version'
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
index 16a307c06..d0a9f11dc 100644
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -1 +1,2 @@
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 746c73eaf..faf1aea97 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -6,8 +6,6 @@
register: mktemp
changed_when: False
-- include: wire_aggregator.yml
-
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
with_first_found:
@@ -112,15 +110,6 @@
when:
- not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-- shell: >
- oc get policybindings/kube-system:default -n kube-system || echo "not found"
- register: get_kube_system
- changed_when: no
-
-- command: >
- oc create policybinding kube-system -n kube-system
- when: "'not found' in get_kube_system.stdout"
-
- oc_adm_policy_user:
namespace: kube-service-catalog
resource_kind: scc
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
index 1c788470a..6431c6d3f 100644
--- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -18,11 +18,10 @@
changed_when: false
delegate_to: "{{ first_master }}"
-
# TODO: this currently has a bug where hostnames are required
- name: Creating First Master Aggregator signer certs
command: >
- oc adm ca create-signer-cert
+ {{ hostvars[first_master].openshift.common.client_binary }} adm ca create-signer-cert
--cert=/etc/origin/master/front-proxy-ca.crt
--key=/etc/origin/master/front-proxy-ca.key
--serial=/etc/origin/master/ca.serial.txt
@@ -79,7 +78,7 @@
- name: Create first master api-client config for Aggregator
command: >
- oc adm create-api-client-config
+ {{ hostvars[first_master].openshift.common.client_binary }} adm create-api-client-config
--certificate-authority=/etc/origin/master/front-proxy-ca.crt
--signer-cert=/etc/origin/master/front-proxy-ca.crt
--signer-key=/etc/origin/master/front-proxy-ca.key
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index bc0dde17d..3f6dab78b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -51,8 +51,8 @@
kind: pod
state: list
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until: "heketi_pod.results.results[0]['items'] | count == 0"
+ register: deploy_heketi_pod
+ until: "deploy_heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
@@ -103,7 +103,7 @@
state: list
kind: pod
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
+ register: deploy_heketi_pod
when: glusterfs_heketi_is_native
- name: Check if need to deploy deploy-heketi
@@ -111,9 +111,9 @@
glusterfs_heketi_deploy_is_missing: False
when:
- "glusterfs_heketi_is_native"
- - "heketi_pod.results.results[0]['items'] | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Check for existing heketi pod
oc_obj:
@@ -147,6 +147,21 @@
when:
- glusterfs_heketi_is_native
+- name: Get heketi admin secret
+ oc_secret:
+ state: list
+ namespace: "{{ glusterfs_namespace }}"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ decode: True
+ register: glusterfs_heketi_admin_secret
+
+- name: Set heketi admin key
+ set_fact:
+ glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}"
+ when:
+ - glusterfs_is_native
+ - glusterfs_heketi_admin_secret.results.results[0]
+
- name: Generate heketi admin key
set_fact:
glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
@@ -190,14 +205,37 @@
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: deploy_heketi_pod
+ until:
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when:
+ - glusterfs_heketi_is_native
+ - not glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
+- name: Place heketi topology on heketi Pod
+ shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_is_native
+
- name: Load heketi topology
command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 3ba1eb2d2..73396c9af 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,16 +6,6 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create heketi topology secret
- oc_secret:
- namespace: "{{ glusterfs_namespace }}"
- state: present
- name: "heketi-{{ glusterfs_name }}-topology-secret"
- force: True
- files:
- - name: topology.json
- path: "{{ mktemp.stdout }}/topology.json"
-
- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -39,18 +29,7 @@
HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- TOPOLOGY_PATH: "{{ mktemp.stdout }}"
-- name: Wait for deploy-heketi pod
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- kind: pod
- state: list
- selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until:
- - "heketi_pod.results.results[0]['items'] | count > 0"
- # Pod's 'Ready' status must be True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
- delay: 10
- retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+- name: Set heketi Deployed fact
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index afc04a537..54a6dd7c3 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 51f8f4e0e..3047fbaf9 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -31,9 +31,9 @@
group: nfsnobody
with_items:
- "{{ openshift.hosted.registry }}"
- - "{{ openshift.hosted.metrics }}"
- - "{{ openshift.hosted.logging }}"
- - "{{ openshift.hosted.loggingops }}"
+ - "{{ openshift.metrics }}"
+ - "{{ openshift.logging }}"
+ - "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
- name: Configure exports
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 7e8f70b23..0141e0d25 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,5 +1,5 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
-{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }}
-{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }}
+{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
+{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
+{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 204abe27e..f4e9ff43a 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,11 +5,15 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
- name: Install the base package for versioning
package:
name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not is_containerized | bool
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
@@ -162,7 +166,9 @@
- set_fact:
openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
- fail:
msg: openshift_version role was unable to set openshift_version
@@ -177,7 +183,10 @@
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
+
- fail:
msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 39d59db70..9738929d2 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -3,20 +3,17 @@
command: subscription-manager repos --disable="*"
- set_fact:
- default_ose_version: '3.0'
- when: deployment_type == 'enterprise'
-
-- set_fact:
default_ose_version: '3.6'
- when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+ when: deployment_type == 'openshift-enterprise'
- set_fact:
ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
- when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ when:
+ - deployment_type == 'openshift-enterprise'
+ - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..c43e5513d 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -58,10 +62,12 @@
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
- command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
when:
- - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
+ - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
new file mode 100644
index 000000000..fb407c4a2
--- /dev/null
+++ b/roles/template_service_broker/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# placeholder file?
+template_service_broker_remove: False
+template_service_broker_install: False
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..b3a3d3428
--- /dev/null
+++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true;
diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml
index e3e2f7781..ab5a0cf08 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/template_service_broker/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: Etcd CA
+ author: OpenShift Red Hat
+ description: OpenShift Template Service Broker
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.1
@@ -11,6 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
new file mode 100644
index 000000000..199df83c2
--- /dev/null
+++ b/roles/template_service_broker/tasks/install.yml
@@ -0,0 +1,47 @@
+---
+# Fact setting
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ template_service_broker_image: "{{ template_service_broker_image | default(__template_service_broker_image) }}"
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: present
+
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_rbac_file }}"
+
+- name: Apply template file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" | kubectl apply -f -
+
+# reconcile with rbac
+- name: Reconcile with RBAC file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f -
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml
new file mode 100644
index 000000000..d7ca970c7
--- /dev/null
+++ b/roles/template_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: template_service_broker_install | default(false) | bool
+
+- include: remove.yml
+ when: template_service_broker_remove | default(false) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..207dd9bdb
--- /dev/null
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -0,0 +1,28 @@
+---
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+
+- name: Delete TSB objects
+ shell: >
+ oc process -f "{{ __tsb_files_location }}/{{ __tsb_template_file }}" | kubectl delete -f -
+
+- name: empty out tech preview extension file for service console UI
+ copy:
+ src: remove-openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: absent
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..807f2822c
--- /dev/null
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -0,0 +1,2 @@
+---
+__template_service_broker_image: ""
diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml
new file mode 100644
index 000000000..372ab8f6f
--- /dev/null
+++ b/roles/template_service_broker/vars/main.yml
@@ -0,0 +1,6 @@
+---
+__tsb_files_location: "../../../files/origin-components/"
+
+__tsb_template_file: "apiserver-template.yaml"
+__tsb_config_file: "apiserver-config.yaml"
+__tsb_rbac_file: "rbac-template.yaml"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..807f2822c
--- /dev/null
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,2 @@
+---
+__template_service_broker_image: ""
diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go
index a92d6861d..8b79c48cb 100644
--- a/test/integration/openshift_health_checker/common.go
+++ b/test/integration/openshift_health_checker/common.go
@@ -25,7 +25,7 @@ func (p PlaybookTest) Run(t *testing.T) {
// A PlaybookTest is intended to be run in parallel with other tests.
t.Parallel()
- cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path)
+ cmd := exec.Command("ansible-playbook", "-e", "testing_skip_some_requirements=1", "-i", "/dev/null", p.Path)
cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1")
b, err := cmd.CombinedOutput()
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
index 393a4d6ba..6095beb95 100644
--- a/test/openshift_version_tests.py
+++ b/test/openshift_version_tests.py
@@ -17,39 +17,39 @@ class OpenShiftVersionTests(unittest.TestCase):
# Static tests for legacy filters.
legacy_gte_tests = [{'name': 'oo_version_gte_3_1_or_1_1',
- 'positive_enterprise_version': '3.2.0',
- 'negative_enterprise_version': '3.0.0',
+ 'positive_openshift-enterprise_version': '3.2.0',
+ 'negative_openshift-enterprise_version': '3.0.0',
'positive_origin_version': '1.2.0',
'negative_origin_version': '1.0.0'},
{'name': 'oo_version_gte_3_1_1_or_1_1_1',
- 'positive_enterprise_version': '3.2.0',
- 'negative_enterprise_version': '3.1.0',
+ 'positive_openshift-enterprise_version': '3.2.0',
+ 'negative_openshift-enterprise_version': '3.1.0',
'positive_origin_version': '1.2.0',
'negative_origin_version': '1.1.0'},
{'name': 'oo_version_gte_3_2_or_1_2',
- 'positive_enterprise_version': '3.3.0',
- 'negative_enterprise_version': '3.1.0',
+ 'positive_openshift-enterprise_version': '3.3.0',
+ 'negative_openshift-enterprise_version': '3.1.0',
'positive_origin_version': '1.3.0',
'negative_origin_version': '1.1.0'},
{'name': 'oo_version_gte_3_3_or_1_3',
- 'positive_enterprise_version': '3.4.0',
- 'negative_enterprise_version': '3.2.0',
+ 'positive_openshift-enterprise_version': '3.4.0',
+ 'negative_openshift-enterprise_version': '3.2.0',
'positive_origin_version': '1.4.0',
'negative_origin_version': '1.2.0'},
{'name': 'oo_version_gte_3_4_or_1_4',
- 'positive_enterprise_version': '3.5.0',
- 'negative_enterprise_version': '3.3.0',
+ 'positive_openshift-enterprise_version': '3.5.0',
+ 'negative_openshift-enterprise_version': '3.3.0',
'positive_origin_version': '1.5.0',
'negative_origin_version': '1.3.0'},
{'name': 'oo_version_gte_3_5_or_1_5',
- 'positive_enterprise_version': '3.6.0',
- 'negative_enterprise_version': '3.4.0',
+ 'positive_openshift-enterprise_version': '3.6.0',
+ 'negative_openshift-enterprise_version': '3.4.0',
'positive_origin_version': '3.6.0',
'negative_origin_version': '1.4.0'}]
def test_legacy_gte_filters(self):
for test in self.legacy_gte_tests:
- for deployment_type in ['enterprise', 'origin']:
+ for deployment_type in ['openshift-enterprise', 'origin']:
# Test negative case per deployment_type
self.assertFalse(
self.openshift_version_filters._filters[test['name']](
@@ -70,3 +70,7 @@ class OpenShiftVersionTests(unittest.TestCase):
self.assertFalse(
self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
"{}.{}".format(major, minor)))
+
+ def test_get_filters(self):
+ self.assertTrue(
+ self.openshift_version_filters.filters() == self.openshift_version_filters._filters)
diff --git a/utils/docs/config.md b/utils/docs/config.md
index 3677ffe2e..6d0c6896e 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -52,7 +52,6 @@ Indicates the version of configuration this file was written with. Current imple
The OpenShift variant to install. Currently valid options are:
* openshift-enterprise
- * atomic-enterprise
### variant_version (optional)