summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--docs/proposals/role_decomposition.md2
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml12
-rw-r--r--playbooks/aws/openshift-cluster/install.yml18
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml36
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml32
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml24
-rw-r--r--playbooks/gcp/provision.yml2
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml2
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml7
-rw-r--r--playbooks/openshift-logging/config.yml4
-rw-r--r--playbooks/openshift-master/private/config.yml5
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-master/private/scaleup.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml4
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml1
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-node/private/restart.yml8
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml6
-rw-r--r--playbooks/prerequisites.yml6
-rw-r--r--playbooks/redeploy-certificates.yml22
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/kuryr/tasks/node.yaml4
-rw-r--r--roles/nuage_master/handlers/main.yaml4
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rw-r--r--roles/openshift_excluder/README.md5
-rw-r--r--roles/openshift_excluder/defaults/main.yml2
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/main.yml5
-rw-r--r--roles/openshift_facts/defaults/main.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py20
-rw-r--r--roles/openshift_health_checker/defaults/main.yml6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py5
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py9
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py7
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_logging/handlers/main.yml4
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/restart.yml4
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml28
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml12
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j216
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j216
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/tasks/aws.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml8
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml8
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service8
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service22
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_version/defaults/main.yml6
-rw-r--r--roles/openshift_version/tasks/main.yml8
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml6
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml1
140 files changed, 572 insertions, 516 deletions
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index e52e47202..3788e9bfb 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -10,7 +10,7 @@ The table below outlines the defaults per `openshift_deployment_type`:
| openshift_deployment_type | origin | openshift-enterprise |
|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
+| **openshift_service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 6434e24e7..37d080d5c 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -262,7 +262,7 @@ dependencies:
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
when: not ansible_check_mode and "not found" in logging_project_result.stderr
- name: Create logging cert directory
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5815c4975..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -33,8 +33,8 @@
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
index 1dabae357..9d9ed29de 100644
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -1,19 +1,19 @@
---
-- include: ../../openshift-hosted/private/config.yml
+- import_playbook: ../../openshift-hosted/private/config.yml
-- include: ../../openshift-metrics/private/config.yml
+- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
-- include: ../../openshift-logging/private/config.yml
+- import_playbook: ../../openshift-logging/private/config.yml
when: openshift_logging_install_logging | default(false) | bool
-- include: ../../openshift-prometheus/private/config.yml
+- import_playbook: ../../openshift-prometheus/private/config.yml
when: openshift_hosted_prometheus_deploy | default(false) | bool
-- include: ../../openshift-service-catalog/private/config.yml
+- import_playbook: ../../openshift-service-catalog/private/config.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: ../../openshift-management/private/config.yml
+- import_playbook: ../../openshift-management/private/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index f8206529a..b03fb0b7f 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -16,31 +16,31 @@
tasks_from: master_facts.yml
- name: run the init
- include: ../../init/main.yml
+ import_playbook: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../openshift-checks/private/install.yml
+ import_playbook: ../../openshift-checks/private/install.yml
- name: etcd install
- include: ../../openshift-etcd/private/config.yml
+ import_playbook: ../../openshift-etcd/private/config.yml
- name: include nfs
- include: ../../openshift-nfs/private/config.yml
+ import_playbook: ../../openshift-nfs/private/config.yml
when: groups.oo_nfs_to_config | default([]) | count > 0
- name: include loadbalancer
- include: ../../openshift-loadbalancer/private/config.yml
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
- name: include openshift-master config
- include: ../../openshift-master/private/config.yml
+ import_playbook: ../../openshift-master/private/config.yml
- name: include master additional config
- include: ../../openshift-master/private/additional_config.yml
+ import_playbook: ../../openshift-master/private/additional_config.yml
- name: include master additional config
- include: ../../openshift-node/private/config.yml
+ import_playbook: ../../openshift-node/private/config.yml
- name: include openshift-glusterfs
- include: ../../openshift-glusterfs/private/config.yml
+ import_playbook: ../../openshift-glusterfs/private/config.yml
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index f5eb01b14..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,6 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index 78dd6a49b..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,16 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster on masters
- include: install.yml
+ import_playbook: install.yml
- name: provision the infra/compute playbook to install node resources
- include: provision_nodes.yml
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
- name: Include the hosted.yml playbook to finish the hosted configuration
- include: hosted.yml
+ import_playbook: hosted.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index c46b22331..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index c880fe7f7..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index aeec5f5cc..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 4664a9a2b..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index cbb89bc4d..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1adfbdec0..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b4da18281..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
index 14b0f85d4..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -4,4 +4,4 @@
#
# Upgrades scale group nodes only.
#
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
index f7e5dd1d2..c4094aa7e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index cc04d81c1..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 37a9f69bb..74981cc31 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
index e8f9d94e2..a2a9d59f2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index acb4195e3..869e185af 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index df19097e1..a5867434b 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 322f527ae..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
hosts: oo_all_hosts
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 261143080..5a877809a 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/evaluate_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
index 800621857..33ed6a283 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index a66301c0d..ab3171c9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..5c6def484 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,7 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_tasks: upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -51,7 +51,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +59,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 3b779becb..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -11,9 +11,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 83be290e6..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -44,5 +44,5 @@
register: result
until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..5454a6680 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 6d8503879..18a08eb99 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,7 +1,7 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../../docker/upgrade_check.yml
+- include_tasks: ../../docker/upgrade_check.yml
when:
- docker_upgrade is not defined or (docker_upgrade | bool)
- not (openshift.common.is_atomic | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 6a5bc24f7..bef95546d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -13,21 +13,21 @@
block:
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift_service_type }}-master"
# In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
register: master_api_service_status
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
when:
- master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
- name: Ensure Master is running
service:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 84b740227..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 0d3fed212..37fc8a0f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -17,7 +17,7 @@
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- name: Backup and upgrade etcd
- include: ../../../openshift-etcd/private/upgrade_main.yml
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +30,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +71,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +82,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -275,7 +275,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -305,7 +305,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..f7a85545b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
@@ -45,7 +45,6 @@
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index d9ce3a7e3..47410dff3 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -13,7 +13,7 @@
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
- name: initialize upgrade bits
- include: init.yml
+ import_playbook: init.yml
- name: Drain and upgrade nodes
hosts: oo_sg_current_nodes
@@ -42,7 +42,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5f9c56867..9f9399ff9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,7 +17,7 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -43,27 +43,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -73,29 +73,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -107,12 +107,12 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1aac3d014..7374160d6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,7 +25,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -51,23 +51,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,10 +111,10 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 306b76422..de9bf098e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 6d4949542..0c1a99272 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -122,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 0a592896b..9dcad352c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,29 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -115,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -126,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b381d606a..27a7f67ea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index e7d7756d1..ead2efbd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,29 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -111,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -122,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index be362e3ff..ae37b1359 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,29 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -115,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -126,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 6e68116b0..dd716b241 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -74,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -104,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 94c16cae0..eb688f189 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -83,29 +83,29 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +117,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -137,6 +137,6 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 2045f6379..983bb4a63 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -87,29 +87,29 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +121,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -141,4 +141,4 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 6134f8653..d95cfa4e1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -80,25 +80,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +110,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml
index 9887f09f2..6016e6a78 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -10,4 +10,4 @@
name: openshift_gcp
- name: run the cluster deploy
- include: ../deploy_cluster.yml
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 3d6c79834..35407969e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -19,7 +19,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index c7a532622..be177b714 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -22,7 +22,7 @@
name: openshift_master
tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 834bd242d..9ddb4afe2 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -28,8 +28,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
index 65fb0abda..518a1d624 100644
--- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-registry-certificates.yml
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
index 8dc052751..a74dd8c79 100644
--- a/playbooks/openshift-hosted/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-router-certificates.yml
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 78fe663db..2636d857e 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -11,13 +11,6 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- name: Configure firewall load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
-
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
index 8837a2d32..d71b4f1c5 100644
--- a/playbooks/openshift-logging/config.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -4,6 +4,6 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/config.yml
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index afb8d6bd1..9f6d5afcc 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -19,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -180,7 +179,6 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- role: openshift_hosted_facts
- role: openshift_clock
@@ -228,6 +226,8 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- include_tasks: tasks/wire_aggregator.yml
@@ -237,7 +237,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-master/private/redeploy-certificates.yml
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 59657574a..2a190935e 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -207,7 +207,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
@@ -272,7 +272,7 @@
state: absent
changed_when: false
-- include: ../../openshift-node/private/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 8229eccfa..007b23ea3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,11 +20,11 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index ecf8f15d9..4f55d5c82 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -180,13 +180,13 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-master/redeploy-certificates.yml
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
index 3ae74c7a0..27f4e6b7d 100644
--- a/playbooks/openshift-master/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-openshift-ca.yml
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml
index 6ea77e00b..3625efcc6 100644
--- a/playbooks/openshift-nfs/private/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -14,7 +14,6 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index dc5d7a57e..32b288c8b 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -10,7 +10,6 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
- role: tuned
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 5afa83be7..ef07669cb 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -12,7 +12,6 @@
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/openshift-node/private/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-node/private/redeploy-certificates.yml
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 41eb00f99..0786bd7d3 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -23,9 +23,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -40,7 +40,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..541913aef 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 8ed01b192..3211f619a 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -9,4 +9,4 @@
# some logic here?
- name: run the cluster deploy
- include: ../../deploy_cluster.yml
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 3e295b2c8..583e72b51 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -10,7 +10,7 @@
# NOTE(shadower): Bring in the host groups:
- name: evaluate groups
- include: ../../init/evaluate_groups.yml
+ import_playbook: ../../init/evaluate_groups.yml
- name: Wait for the nodes and gather their facts
@@ -27,7 +27,7 @@
setup:
- name: set common facts
- include: ../../init/facts.yml
+ import_playbook: ../../init/facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
index 5d88c105f..fc2854605 100644
--- a/playbooks/openstack/openshift-cluster/provision_install.yml
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -1,9 +1,9 @@
---
- name: Check the prerequisites for cluster provisioning in OpenStack
- include: prerequisites.yml
+ import_playbook: prerequisites.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster
- include: install.yml
+ import_playbook: install.yml
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 0cc5fcef8..7b7868cfe 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,4 +3,10 @@
vars:
skip_verison: True
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
index 45135c10e..b5fcb951d 100644
--- a/playbooks/redeploy-certificates.yml
+++ b/playbooks/redeploy-certificates.yml
@@ -1,26 +1,26 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
-- include: openshift-etcd/private/redeploy-certificates.yml
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
-- include: openshift-master/private/redeploy-certificates.yml
+- import_playbook: openshift-master/private/redeploy-certificates.yml
-- include: openshift-node/private/redeploy-certificates.yml
+- import_playbook: openshift-node/private/redeploy-certificates.yml
-- include: openshift-etcd/private/restart.yml
+- import_playbook: openshift-etcd/private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
-- include: openshift-node/private/restart.yml
+- import_playbook: openshift-node/private/restart.yml
-- include: openshift-hosted/private/redeploy-router-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
when: openshift_hosted_manage_router | default(true) | bool
-- include: openshift-hosted/private/redeploy-registry-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
when: openshift_hosted_manage_registry | default(true) | bool
-- include: openshift-master/private/revert-client-ca.yml
+- import_playbook: openshift-master/private/revert-client-ca.yml
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 80e4d391d..705d39f9a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -15,7 +15,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
until: not l_restart_node_result | failed
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index ffe814713..08f2d5adc 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -36,7 +36,7 @@
- name: Configure OpenShift node with disabled service proxy
lineinfile:
- dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
@@ -44,5 +44,5 @@
- name: force node restart to disable the proxy
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index cb83c8ead..7b55dda56 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
(not master_api_service_status_changed | default(false))
@@ -8,7 +8,7 @@
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index e68ae74bd..ede6f2125 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
+ systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index fdf01b7c2..88d62de49 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
-openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node
nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 05e0a1352..eb00f13db 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,7 +9,7 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 140c6ea26..a90143aa3 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift.common.service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 80cb88d45..7b43d5adf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -28,7 +28,7 @@ Role Variables
| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
-| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift |
| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
@@ -46,15 +46,12 @@ Example Playbook
# Disable all excluders
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Enable all excluders
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Disable all excluders and verify appropriate excluder packages are available for upgrade
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index d4f151142..3a910e490 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -2,7 +2,7 @@
# keep the 'current' package or update to 'latest' if available?
r_openshift_excluder_package_state: present
r_openshift_excluder_docker_package_state: present
-
+r_openshift_excluder_service_type: "{{ openshift_service_type }}"
# Legacy variables are included for backwards compatibility with v3.5
# Inventory variables Legacy
# openshift_enable_excluders enable_excluders
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 871081c19..a9653edda 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_facts
- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index 93d6ef149..f0e87ba25 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -19,11 +19,6 @@
msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
when: r_openshift_excluder_action not in ['enable', 'disable']
- - name: Fail if r_openshift_excluder_service_type is not defined
- fail:
- msg: "r_openshift_excluder_service_type must be specified for this role"
- when: r_openshift_excluder_service_type is not defined
-
- name: Fail if r_openshift_excluder_upgrade_target is not defined
fail:
msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index a182d23c5..53a3bc87e 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -98,3 +98,9 @@ openshift_prometheus_alertbuffer_storage_create_pvc: False
openshift_router_selector: "region=infra"
openshift_hosted_router_selector: "{{ openshift_router_selector }}"
openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 96305e899..520c00340 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -537,7 +537,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
+ includes master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
@@ -549,14 +549,6 @@ def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
@@ -1020,8 +1012,13 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
+ deployment_type = facts['common']['deployment_type']
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
+
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
+ env_path = filename % service_type
if not os.path.exists(env_path):
continue
@@ -1410,7 +1407,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1587,7 +1583,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml
new file mode 100644
index 000000000..f25a0dc79
--- /dev/null
+++ b/roles/openshift_health_checker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 090e438ff..980e23f27 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
group_names = self.get_var("group_names", default=[])
packages = set()
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 13a91dadf..f3a628e28 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index ec46c3b4b..fc333dfd4 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -8,12 +8,12 @@ def task_vars():
return dict(
openshift=dict(
common=dict(
- service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
+ openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index dd6f4ad81..a29dc166b 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
openshift=dict(
- common=dict(service_type="origin", is_containerized=False),
- )
+ common=dict(is_containerized=False),
+ ),
+ openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6f0457549..dd98ff4d8 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -10,10 +10,11 @@ def test_openshift_version_not_supported():
openshift_release = '111.7.0'
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -27,9 +28,10 @@ def test_invalid_openshift_release_format():
return {}
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_image_tag='v0',
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -47,9 +49,10 @@ def test_invalid_openshift_release_format():
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
+ openshift_service_type='origin'
)
return_value = {} # note: check.execute_module modifies return hash contents
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 9815acb38..a1e6e0879 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
- dict(openshift=dict(common=dict(service_type='openshift'))),
+ dict(openshift_service_type='origin'),
set(),
set(['openshift-master', 'openshift-node']),
),
(
dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-master']),
@@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
@@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 3cf4ce033..ea8e02b97 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
return dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type=deployment_type)),
+ openshift_service_type=service_type,
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type=deployment_type,
@@ -29,7 +32,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e6b8b8ac8..557bfe022 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,7 +1,7 @@
---
- name: restart master api
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
@@ -10,7 +10,7 @@
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 5f4e6df71..9be5508aa 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -16,7 +16,7 @@
- name: Install Master package
package:
- name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- not openshift.common.is_containerized | bool
@@ -141,7 +141,7 @@
# The template file will stomp any other settings made.
- block:
- name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
failed_when: false
changed_when: false
register: l_already_set
@@ -203,7 +203,7 @@
- name: Start and enable master api on first master
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -214,7 +214,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -230,7 +230,7 @@
- name: Start and enable master api all masters
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
@@ -241,7 +241,7 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
@@ -258,7 +258,7 @@
- name: Start and enable master controller service
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
enabled: yes
state: started
register: l_start_result
@@ -267,7 +267,7 @@
delay: 60
- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- l_start_result | failed
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index ca04d2243..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 4f8b758fd..715347101 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -1,7 +1,7 @@
---
- name: Restart master API
service:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when: openshift_master_ha | bool
- name: Wait for master API to come back online
@@ -14,7 +14,7 @@
when: openshift_master_ha | bool
- name: Restart master controllers
service:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: restarted
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 450f6d803..f6c5ce0dd 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -8,12 +8,12 @@
- name: Check Master system container package
command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
+ atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
# HA
- name: Install or Update HA api master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
@@ -21,7 +21,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index ee76413e3..76b6f46aa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -13,7 +13,7 @@
- name: Disable the legacy master service if it exists
systemd:
- name: "{{ openshift.common.service_type }}-master"
+ name: "{{ openshift_service_type }}-master"
state: stopped
enabled: no
masked: yes
@@ -21,7 +21,7 @@
- name: Remove the legacy master service if it exists
file:
- path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service"
state: absent
ignore_errors: true
when:
@@ -40,7 +40,7 @@
- name: Create the ha systemd unit files
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
- dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
+ dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service"
when:
- not l_is_master_system_container | bool
with_items:
@@ -55,7 +55,7 @@
- name: enable master services
systemd:
- name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ name: "{{ openshift_service_type }}-master-{{ item }}"
enabled: yes
with_items:
- api
@@ -64,13 +64,13 @@
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api
register: l_master_api_proxy
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api
register: master_api_aws
failed_when: false
changed_when: false
@@ -78,7 +78,7 @@
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
backup: true
notify:
- restart master api
@@ -89,7 +89,7 @@
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
@@ -98,19 +98,19 @@
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
no_log: True
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_proxy
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_aws
failed_when: false
changed_when: false
@@ -118,14 +118,14 @@
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
backup: true
notify:
- restart master controllers
- name: Restore Master Controllers Proxy Config Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
@@ -135,7 +135,7 @@
- name: Restore Master Controllers AWS Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index caab3045a..f50b91ff5 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -12,11 +12,11 @@
package: name={{ master_pkgs | join(',') }} state=present
vars:
master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result | success
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index cec3d3fb1..5e46d9121 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-api \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ --name {{ openshift_service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-api
+SyslogIdentifier={{ openshift_service_type }}-master-api
Restart=always
RestartSec=5s
[Install]
WantedBy={{ openshift_docker_service_name }}.service
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a0248151d..899575f1a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -1,19 +1,19 @@
[Unit]
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
-Wants={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.common.service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
After={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-controllers \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ --name {{ openshift_service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 02bfd6f62..ed8a47df8 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=network-online.target
After=etcd.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
@@ -20,4 +20,4 @@ RestartSec=5s
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index fae021845..b36963f73 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -2,19 +2,19 @@
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
After=network-online.target
-After={{ openshift.common.service_type }}-master-api.service
-Wants={{ openshift.common.service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index e0329ee7c..1f4b5a116 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,12 +1,12 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f3867fe4a..fff927944 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 229c6bbed..170a3dc6e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
until: not l_openshift_node_restart_node_result | failed
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
index 38c2b794d..a7f1fc116 100644
--- a/roles/openshift_node/tasks/aws.yml
+++ b/roles/openshift_node/tasks/aws.yml
@@ -1,7 +1,7 @@
---
- name: Configure AWS Cloud Provider Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 741a2234f..e5c80bd09 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -38,7 +38,7 @@
- name: Configure Node Environment Variables
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
@@ -76,7 +76,7 @@
- name: Start and enable node dep
systemd:
daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
+ name: "{{ openshift_service_type }}-node-dep"
enabled: yes
state: started
@@ -84,7 +84,7 @@
block:
- name: Start and enable node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
enabled: yes
state: started
daemon_reload: yes
@@ -95,7 +95,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 527580481..ebc1426d3 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
index d60794305..7ddd319d2 100644
--- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Proxy Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
index ee91a88ab..9f1145d12 100644
--- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node dependencies docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service"
src: openshift.docker.node.dep.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..649fc5f6b 100644
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: openshift.docker.node.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index d743d2188..c13a6cf6c 100644
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - docker_upgrade_nuke_images
# - docker_version
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1ed4a05c1..f93aed246 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,14 +3,14 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
until: result | success
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 98a391890..98978ec6f 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -8,10 +8,10 @@
- name: Install or Update node system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 397e1ba18..c532147b1 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,7 +1,7 @@
---
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 561b56918..9f333645a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -17,7 +17,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
@@ -26,8 +26,8 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
- etcd_container
failed_when: false
when: openshift.common.is_containerized | bool
@@ -80,9 +80,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -91,7 +91,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 3f1abceab..65c301783 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
@@ -27,9 +27,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- name: Wait for master API to come back online
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index fcbe1a598..120b93bc3 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -1,13 +1,13 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - component
# - openshift_pkg_version
# - openshift.common.is_atomic
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
register: result
until: result | success
@@ -19,7 +19,7 @@
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 5964ac095..8b43beb07 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,11 +1,11 @@
[Unit]
Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
+PartOf={{ openshift_service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
+SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b33ca542..b174c7023 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,5 +1,5 @@
[Unit]
-After={{ openshift.common.service_type }}-master.service
+After={{ openshift_service_type }}-master.service
After={{ openshift_docker_service_name }}.service
After=openvswitch.service
PartOf={{ openshift_docker_service_name }}.service
@@ -10,20 +10,20 @@ PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
+Wants={{ openshift_service_type }}-master.service
+Requires={{ openshift_service_type }}-node-dep.service
+After={{ openshift_service_type }}-node-dep.service
Requires=dnsmasq.service
After=dnsmasq.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
-v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
@@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..4adf04e90 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,8 @@
---
openshift_protect_installed_version: True
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 4f9158ade..ae0f68a5b 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -101,13 +101,13 @@
when: is_containerized | bool
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
@@ -196,7 +196,7 @@
- openshift_version.startswith(openshift_release) | bool
msg: |-
You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
on host {{ inventory_hostname }}.
We will only install the latest RPMs, so please ensure you are getting the release
you expect. You may need to adjust your Ansible inventory, modify the repositories
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c40777bf1..c7ca5ceae 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -8,14 +8,14 @@
- openshift_version is not defined
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index e3459b376..cda69408d 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -46,7 +46,6 @@
- hosts: all
tasks:
-
# run before openshift_version to prevent it breaking
- include: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2" }