summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/aws/README.md10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_elb.yml9
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_s3.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml1
l---------playbooks/common/openshift-cluster/upgrades/v3_10/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml58
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml52
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml27
-rw-r--r--playbooks/container-runtime/private/config.yml6
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml6
-rw-r--r--playbooks/init/base_packages.yml8
-rw-r--r--playbooks/init/evaluate_groups.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml33
-rw-r--r--playbooks/openshift-etcd/scaleup.yml1
-rw-r--r--playbooks/openshift-hosted/deploy_registry.yml4
-rw-r--r--playbooks/openshift-hosted/deploy_router.yml4
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml3
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml3
-rw-r--r--playbooks/openshift-logging/private/config.yml41
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-master/scaleup.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml2
-rw-r--r--playbooks/openshift-node/scaleup.yml1
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml2
41 files changed, 342 insertions, 121 deletions
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index bdc98d1e0..cf811ca84 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl
## Uninstall / Deprovisioning
-At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
-
-To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
```
ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
@@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi
This should result in removal of the security groups and VPC that were created.
+Cleaning up the S3 bucket contents can be accomplished with:
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml
+```
+
NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/uninstall_elb.yml b/playbooks/aws/openshift-cluster/uninstall_elb.yml
new file mode 100644
index 000000000..c1b724f0c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_elb.yml
@@ -0,0 +1,9 @@
+---
+- name: Delete elb
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: deprovision elb
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_elb.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml
new file mode 100644
index 000000000..448b47aee
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Empty/delete s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: empty/delete s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
new file mode 100644
index 000000000..7ede3a28c
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
@@ -0,0 +1,20 @@
+# v3.10 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
new file mode 100644
index 000000000..977b4f381
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
new file mode 100644
index 000000000..8b76bf4ff
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
new file mode 100644
index 000000000..b4353edc2
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
index 23a3fcbb5..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index ba783638d..a9a35b028 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -6,6 +6,7 @@
g_new_node_hosts: []
- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/base_packages.yml
- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index de612da21..9c927c0a1 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -113,6 +113,25 @@
registry_url: "{{ openshift.master.registry_url }}"
openshift_hosted_templates_import_command: replace
+ post_tasks:
+ # we need to migrate customers to the new pattern of pushing to the registry via dns
+ # Step 1: verify the certificates have the docker registry service name
+ - shell: >
+ echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)'
+ register: cert_output
+ changed_when: false
+ failed_when:
+ - cert_output.rc not in [0, 1]
+
+ # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
+ - name: set a fact to include the registry certs playbook if needed
+ set_fact:
+ openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}"
+
+# Run the redeploy certs based upon the certificates
+- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry
+ import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml
+
# Check for warnings to be printed at the end of the upgrade:
- name: Clean up and display warnings
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index 2b27f8dd0..44af37b2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -5,8 +5,6 @@
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
-- import_playbook: verify_cluster.yml
-
- name: Update repos on upgrade hosts
hosts: "{{ l_upgrade_repo_hosts }}"
roles:
@@ -53,6 +51,8 @@
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+- import_playbook: verify_cluster.yml
+
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 693ab2d96..4902b9ecd 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -17,6 +17,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_pkg_version is defined
+ - openshift_pkg_version != ""
- openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
- fail:
@@ -25,6 +26,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_image_tag is defined
+ - openshift_image_tag != ""
- openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index a10fd4bee..3c0b72832 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,29 +3,6 @@
# Upgrade Masters
###############################################################################
-# Prior to 3.6, openshift-ansible created etcd serving certificates
-# without a SubjectAlternativeName entry for the system hostname. The
-# SAN list in Go 1.8 is now (correctly) authoritative and since
-# openshift-ansible configures masters to talk to etcd hostnames
-# rather than IP addresses, we must correct etcd certificates.
-#
-# This play examines the etcd serving certificate SANs on each etcd
-# host and records whether or not the system hostname is missing.
-- name: Examine etcd serving certificate SAN
- hosts: oo_etcd_to_config
- tasks:
- - slurp:
- src: /etc/etcd/server.crt
- register: etcd_serving_cert
- - set_fact:
- __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
-
-# Redeploy etcd certificates when hostnames were missing from etcd
-# serving certificate SANs.
-- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml
- when:
- - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
-
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -56,7 +33,6 @@
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
@@ -72,8 +48,6 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
roles:
- openshift_facts
@@ -96,6 +70,12 @@
- include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
+ - name: Disable master controller
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ enabled: false
+ when: openshift.common.rolling_restart_mode == 'system'
+
- include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
@@ -118,7 +98,6 @@
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- openshift_version is version_compare('3.7','<')
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
run_once: true
@@ -254,7 +233,6 @@
register: l_pb_upgrade_control_plane_post_upgrade_storage
when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
@@ -333,9 +311,13 @@
post_tasks:
- import_role:
name: openshift_node
+ tasks_from: upgrade_pre.yml
+ - import_role:
+ name: openshift_node
tasks_from: upgrade.yml
- import_role:
name: openshift_manage_node
tasks_from: config.yml
vars:
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ openshift_manage_node_is_master: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
new file mode 100644
index 000000000..ec1da6d39
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
@@ -0,0 +1,7 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: upgrade_control_plane.yml
+
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
new file mode 100644
index 000000000..64ee03562
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
@@ -0,0 +1,58 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks 3.10
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.10'
+ openshift_upgrade_min: '3.9'
+ openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ openshift_release: '3.10'
+
+- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
new file mode 100644
index 000000000..eea1b250e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
@@ -0,0 +1,35 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.10'
+ openshift_upgrade_min: '3.9'
+ openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index bf6e8605e..ec1da6d39 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,54 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- import_playbook: ../init.yml
+- import_playbook: upgrade_control_plane.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.9'
- openshift_upgrade_min: '3.7'
- openshift_release: '3.9'
-
-- import_playbook: ../pre/config.yml
- vars:
- l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
- l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_facts
- tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index c8a42322d..9c7677f1b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -15,6 +15,7 @@
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
## If they've specified pkg_version or image_tag preserve that for later use
@@ -26,6 +27,7 @@
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
_requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ openshift_pkg_version: ''
_requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
@@ -61,10 +63,8 @@
# Pre-upgrade completed
-- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.8'
- openshift_pkg_version: ''
+- name: Intermediate 3.8 Upgrade
+ import_playbook: ../upgrade_control_plane.yml
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
@@ -77,7 +77,7 @@
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
- openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
# Set the user's specified image_tag for 3.9 upgrade if it was provided.
- set_fact:
openshift_image_tag: "{{ _requested_image_tag }}"
@@ -106,6 +106,7 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
+ openshift_version_reinit: True
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
@@ -114,8 +115,6 @@
pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -124,14 +123,16 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
+ - name: Restart master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
+ state: restarted
+ when: openshift.common.rolling_restart_mode == 'services'
+ - name: Re-enable master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: started
+ enabled: true
+ when: openshift.common.rolling_restart_mode == 'system'
- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 5396df20a..d5312de15 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -12,6 +12,12 @@
- role: container_runtime
tasks:
- import_role:
+ name: openshift_excluder
+ tasks_from: enable.yml
+ vars:
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_enable_openshift_excluder: false
+ - import_role:
name: container_runtime
tasks_from: package_docker.yml
when:
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
index 787de8ebc..0daf61122 100644
--- a/playbooks/gcp/openshift-cluster/build_image.yml
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -62,6 +62,12 @@
timeout: 120
with_items: "{{ gce.instance_data }}"
+- name: Wait for full SSH connection
+ hosts: nodes
+ gather_facts: no
+ tasks:
+ - wait_for_connection:
+
- hosts: nodes
tasks:
- name: Set facts
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 0a730a88a..addb4f44d 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -1,8 +1,9 @@
---
-# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_base_packages_hosts may be passed in via prerequisites.yml during scaleup plays
+# and upgrade_control_plane.yml upgrade plays.
- name: Install packages necessary for installer
- hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
+ hosts: "{{ l_base_packages_hosts | default('oo_all_hosts') }}"
any_errors_fatal: true
tasks:
- when:
@@ -16,8 +17,9 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"
- yum-utils
+ when: item != ''
register: result
until: result is succeeded
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index e8bf1892c..81d7d63ca 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -51,7 +51,7 @@
upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
for documentation on how to migrate from embedded to external etcd.
when:
- - g_etcd_hosts | default([]) | length not in [5,3,1]
+ - g_etcd_hosts | default([]) | length == 0
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 8997680f9..fea588260 100644
--- a/playbooks/openshift-etcd/private/upgrade_main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -1,4 +1,37 @@
---
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
+ tasks:
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: ../../openshift-master/private/restart.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 656454fe3..1f8cb7391 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -32,6 +32,7 @@
l_build_container_groups_hosts: "oo_new_etcd_to_config"
l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_base_packages_hosts: "oo_new_etcd_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
when:
diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml
new file mode 100644
index 000000000..2453329dd
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_registry.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_registry.yml
diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml
new file mode 100644
index 000000000..e832eeeea
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_router.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_router.yml
diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index b817221b8..d88209593 100644
--- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
@@ -88,8 +88,7 @@
- name: Redeploy docker registry
command: >
- {{ openshift_client_binary }} deploy dc/docker-registry
- --latest
+ {{ openshift_client_binary }} rollout latest dc/docker-registry
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 0df748f47..952a5f4ee 100644
--- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -129,8 +129,7 @@
- name: Redeploy router
command: >
- {{ openshift_client_binary }} deploy dc/router
- --latest
+ {{ openshift_client_binary }} rollout latest dc/router
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index 07aa8bfde..f2a57f9f8 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -11,6 +11,38 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+- name: Update vm.max_map_count for ES 5.x
+ hosts: all
+ gather_facts: false
+ tasks:
+ - when:
+ - openshift_logging_es5_techpreview | default(false) | bool
+ - openshift_deployment_type in ['origin']
+ block:
+ - name: Checking vm max_map_count value
+ command:
+ cat /proc/sys/vm/max_map_count
+ register: _vm_max_map_count
+
+ - stat:
+ path: /etc/sysctl.d/99-elasticsearch.conf
+ register: _99_es_conf
+
+ - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf
+ command: >
+ sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/'
+ register: _curr_vm_max_map_count
+ when: _99_es_conf.stat.exists
+
+ - name: Updating vm.max_map_count value
+ sysctl:
+ name: vm.max_map_count
+ value: 262144
+ sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf"
+ reload: yes
+ when:
+ - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
@@ -20,11 +52,10 @@
- name: Update Master configs
hosts: oo_masters:!oo_first_master
tasks:
- - block:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: not openshift.common.version_gte_3_9
+ - include_role:
+ name: openshift_logging
+ tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 204f1be1d..5aaa0b156 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 09e205afc..0ca5d1a61 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -32,6 +32,7 @@
- import_playbook: ../prerequisites.yml
vars:
l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_base_packages_hosts: "oo_nodes_to_config:oo_masters_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 7249ced70..7371bd7ac 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,6 +16,7 @@
until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
+ when: openshift_node_restart_docker_required | default(True)
- name: Restart containerized services
service:
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index 8b7272485..cdf816fbf 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -4,3 +4,5 @@
- import_playbook: private/redeploy-certificates.yml
- import_playbook: private/restart.yml
+ vars:
+ openshift_node_restart_docker_required: False
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index 9cc7263b7..bda251fa5 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -27,6 +27,7 @@
- import_playbook: ../prerequisites.yml
vars:
l_scale_up_hosts: "oo_nodes_to_config"
+ l_base_packages_hosts: "oo_nodes_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
index 2df39c2a8..b01f7f988 100644
--- a/playbooks/openshift-prometheus/private/uninstall.yml
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -5,4 +5,4 @@
- name: Run the Prometheus Uninstall Role Tasks
include_role:
name: openshift_prometheus
- tasks_from: uninstall
+ tasks_from: uninstall_prometheus