summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/cluster-operator/aws/components.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/version_override.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml75
-rw-r--r--playbooks/common/private/components.yml4
-rw-r--r--playbooks/openshift-etcd/scaleup.yml1
-rw-r--r--playbooks/openshift-glusterfs/private/uninstall.yml8
-rw-r--r--playbooks/openshift-glusterfs/uninstall.yml4
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml6
-rw-r--r--playbooks/openstack/advanced-configuration.md32
-rwxr-xr-xplaybooks/openstack/inventory.py2
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml3
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml14
15 files changed, 170 insertions, 48 deletions
diff --git a/playbooks/cluster-operator/aws/components.yml b/playbooks/cluster-operator/aws/components.yml
new file mode 100644
index 000000000..8587aac45
--- /dev/null
+++ b/playbooks/cluster-operator/aws/components.yml
@@ -0,0 +1,24 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- name: Setup the master node group
+ hosts: localhost
+ tasks:
+ - import_role:
+ name: openshift_aws
+ tasks_from: setup_master_group.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 8392e21ee..094c70b46 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -19,7 +19,7 @@
- import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool
+ when: docker_upgrade | default(True) | bool
# If a node fails, halt everything, the admin will need to clean up and we
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 86cde2844..3144e9ef5 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -130,7 +130,7 @@
# Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
- name: set a fact to include the registry certs playbook if needed
set_fact:
- openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}"
+ openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc != 0 }}"
# Run the redeploy certs based upon the certificates. Defaults to False for insecure registries
- when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool
@@ -165,3 +165,10 @@
msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
when:
- __shared_resource_viewer_protected | default(false)
+
+- name: Upgrade Service Catalog
+ hosts: oo_first_master
+ roles:
+ - role: openshift_service_catalog
+ when:
+ - openshift_enable_service_catalog | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index 44af37b2d..7bf1496cb 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -51,6 +51,10 @@
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+# version_override will set various version-related variables during a double upgrade.
+- import_playbook: version_override.yml
+ when: l_double_upgrade_cp | default(False)
+
- import_playbook: verify_cluster.yml
# If we're only upgrading nodes, we need to ensure masters are already upgraded
@@ -79,3 +83,4 @@
- import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
+ when: docker_upgrade | default(True) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml b/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml
new file mode 100644
index 000000000..b2954397f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/version_override.yml
@@ -0,0 +1,29 @@
+---
+# This playbook overrides normal version setting during double upgrades.
+
+- name: Set proper version values for upgrade
+ hosts: "{{ l_version_override_hosts | default('all:!all') }}"
+ tasks:
+ - set_fact:
+ # All of these will either have been set by openshift_version or
+ # provided by the user; we need to save these for later.
+ l_double_upgrade_saved_version: "{{ openshift_version }}"
+ l_double_upgrade_saved_release: "{{ openshift_release | default(openshift_upgrade_target) }}"
+ l_double_upgrade_saved_tag: "{{ openshift_image_tag }}"
+ l_double_upgrade_saved_pkgv: "{{ openshift_pkg_version }}"
+ - set_fact:
+ # We already ran openshift_version for the second of two upgrades;
+ # here we need to set some variables to enable the first upgrade.
+ # openshift_version, openshift_image_tag, and openshift_pkg_version
+ # will be modified by openshift_version; we want to ensure these
+ # are initially set to first versions to ensure no accidental usage of
+ # second versions (eg, 3.8 and 3.9 respectively) are used.
+ l_double_upgrade_cp_reset_version: True
+ openshift_version: "{{ l_double_upgrade_first_version }}"
+ openshift_release: "{{ l_double_upgrade_first_release }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+# Now that we have force-set a different version, we need to update a few things
+# to ensure we have settings that actually match what's in repos/registries.
+- import_playbook: ../../../../init/version.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 9c7677f1b..c21862dea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -17,32 +17,32 @@
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
-## If they've specified pkg_version or image_tag preserve that for later use
-- name: Configure the upgrade target for the common upgrade tasks 3.8
+- name: Configure the initial upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.8'
+ # We use 3.9 here so when we run openshift_version we can get
+ # correct values for 3.9, 3.8 we will hard-code the values in
+ # ../pre/version_override.yml, if necessary.
+ openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
- openshift_release: '3.8'
- _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- openshift_pkg_version: ''
- _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+
+## Check to see if we need to double upgrade (3.7 -> 3.8 -> 3.9)
+- name: Configure variables for double upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
l_double_upgrade_cp: True
+ l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_double_upgrade_first_version: "3.8"
+ l_double_upgrade_first_release: "3.8"
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- - name: set l_force_image_tag_to_version = True
- set_fact:
- # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
- # to match 3.8 version
- l_force_image_tag_to_version: True
- when: _requested_image_tag is defined
-
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
vars:
+ l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
@@ -52,46 +52,48 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
# Pre-upgrade completed
- name: Intermediate 3.8 Upgrade
import_playbook: ../upgrade_control_plane.yml
- when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ when: l_double_upgrade_cp | default(False)
+
+- name: Restore 3.9 version variables
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ # all:!all == 0 hosts
+ l_version_override_hosts: "all:!all"
+ openshift_version: "{{ l_double_upgrade_saved_version }}"
+ openshift_release: "{{ l_double_upgrade_saved_release }}"
+ openshift_image_tag: "{{ l_double_upgrade_saved_tag }}"
+ openshift_pkg_version: "{{ l_double_upgrade_saved_pkgv }}"
+ when: l_double_upgrade_cp | default(False)
## 3.8 upgrade complete we should now be able to upgrade to 3.9
+- name: Clear some values now that we're done with double upgrades.
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ l_double_upgrade_cp: False
+ l_double_upgrade_cp_reset_version: False
-- name: Configure the upgrade target for the common upgrade tasks 3.9
+# We should be on 3.8 at this point, need to set upgrade_target to 3.9
+- name: Configure the upgrade target for second upgrade
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- - meta: clear_facts
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.8'
- openshift_release: '3.9'
- openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
- # Set the user's specified image_tag for 3.9 upgrade if it was provided.
- - set_fact:
- openshift_image_tag: "{{ _requested_image_tag }}"
- l_force_image_tag_to_version: False
- when: _requested_image_tag is defined
- # If the user didn't specify an image_tag, we need to force update image_tag
- # because it will have already been set during 3.8. If we aren't running
- # a double upgrade, then we can preserve image_tag because it will still
- # be the user provided value.
- - set_fact:
- l_force_image_tag_to_version: True
- when:
- - l_double_upgrade_cp is defined and l_double_upgrade_cp
- - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -106,7 +108,6 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
- openshift_version_reinit: True
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
index 089645d07..739be93c5 100644
--- a/playbooks/common/private/components.yml
+++ b/playbooks/common/private/components.yml
@@ -20,7 +20,9 @@
- import_playbook: ../../openshift-hosted/private/config.yml
- import_playbook: ../../openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 3e2fca8d4..1b2229baa 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -45,6 +45,7 @@
vars:
skip_version: True
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
l_openshift_version_set_hosts: "all:!all"
l_openshift_version_check_hosts: "all:!all"
when:
diff --git a/playbooks/openshift-glusterfs/private/uninstall.yml b/playbooks/openshift-glusterfs/private/uninstall.yml
new file mode 100644
index 000000000..40f178f4c
--- /dev/null
+++ b/playbooks/openshift-glusterfs/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall GlusterFS
+ hosts: oo_first_master
+ tasks:
+ - name: Run glusterfs uninstall role
+ include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: uninstall.yml
diff --git a/playbooks/openshift-glusterfs/uninstall.yml b/playbooks/openshift-glusterfs/uninstall.yml
new file mode 100644
index 000000000..77bf75c23
--- /dev/null
+++ b/playbooks/openshift-glusterfs/uninstall.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 60b0e5bb6..40aaa653c 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -33,6 +33,7 @@
- stat: path="{{ hostvars.localhost.mktemp.stdout }}"
register: exists
changed_when: false
+ when: "'stdout' in hostvars.localhost.mktemp"
- name: Cleanup temp file on localhost
hosts: localhost
@@ -41,6 +42,7 @@
tasks:
- file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
changed_when: false
+ when: "'stdout' in hostvars.localhost.mktemp"
- name: Warn if restarting the system where ansible is running
hosts: oo_masters_to_config
@@ -54,7 +56,9 @@
must be verified manually. To only restart services, set
openshift_master_rolling_restart_mode=services in host
inventory and relaunch the playbook.
- when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ when:
+ - "'stat' in exists"
+ - exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
- set_fact:
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index e8f4cfc32..8df3c40b0 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -273,6 +273,38 @@ openshift_openstack_cluster_node_labels:
mylabel: myvalue
```
+`openshift_openstack_provision_user_commands` allows users to execute
+shell commands via cloud-init for all of the created Nova servers in
+the Heat stack, before they are available for SSH connections.
+Note that you should use custom ansible playbooks whenever
+possible, like this `provision_install_custom.yml` example playbook:
+```
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/provision.yml
+
+- name: My custom actions
+ hosts: cluster_hosts
+ tasks:
+ - do whatever you want here
+
+- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/install.yml
+```
+The playbook leverages a two existing provider interfaces: `provision.yml` and
+`install.yml`. For some cases, like SSH keys configuration and coordinated reboots of
+servers, the cloud-init runcmd directive may be a better choice though. User specified
+shell commands for cloud-init need to be either strings or lists, for example:
+```
+- openshift_openstack_provision_user_commands:
+ - set -vx
+ - systemctl stop sshd # fences off ansible playbooks as we want to reboot later
+ - ['echo', 'foo', '>', '/tmp/foo']
+ - [ ls, /tmp/foo, '||', true ]
+ - reboot # unfences ansible playbooks to continue after reboot
+```
+
+**Note** To protect Nova servers from recreating when the user-data changes via
+`openshift_openstack_provision_user_commands`, the
+`user_data_update_policy` parameter configured to `IGNORE` for Heat resources.
+
The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes
of App nodes that should be removed; for example, ['0', '2'],
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
index d5a8c3e24..c16a9e228 100755
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -89,7 +89,7 @@ def build_inventory():
for server in cluster_hosts:
if 'group' in server.metadata:
- group = server.metadata.group
+ group = server.metadata.get('group')
if group not in inventory:
inventory[group] = {'hosts': []}
inventory[group]['hosts'].append(server.name)
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 2ab7d14a0..cb6bf4d11 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -8,8 +8,7 @@
# values here. We do it in the OSEv3 group vars. Do we need to add
# some logic here?
-- name: run the cluster deploy
- import_playbook: ../../prerequisites.yml
+- import_playbook: ../../prerequisites.yml
- name: run the cluster deploy
import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 73c1926a0..44e3d00c0 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -26,9 +26,6 @@
- name: Gather facts for the new nodes
setup:
-- import_playbook: ../../init/basic_facts.yml
-- import_playbook: ../../init/cluster_facts.yml
-
# TODO(shadower): consider splitting this up so people can stop here
# and configure their DNS if they have to.
@@ -43,7 +40,10 @@
- openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined
-- name: Prepare the Nodes in the cluster for installation
+
+- import_playbook: ../../init/basic_facts.yml
+
+- name: Optionally subscribe the RHEL nodes
hosts: oo_all_hosts
become: yes
gather_facts: yes
@@ -63,6 +63,12 @@
- ansible_distribution == "RedHat"
- rh_subscribed is defined
+
+- name: Prepare the Nodes in the cluster for installation
+ hosts: oo_all_hosts
+ become: yes
+ gather_facts: yes
+ tasks:
- name: Install dependencies
import_role:
name: openshift_openstack