summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml16
-rw-r--r--playbooks/aws/README.md18
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml41
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml27
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provision_elb.yml9
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision_s3.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_s3.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_sec_group.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_vpc.yml10
-rw-r--r--playbooks/aws/provisioning_vars.yml.example24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml)0
-rw-r--r--playbooks/cluster-operator/aws/infrastructure.yml21
l---------playbooks/cluster-operator/aws/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml48
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml27
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml1
l---------playbooks/common/openshift-cluster/upgrades/v3_10/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml58
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml52
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml2
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml8
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml7
-rw-r--r--playbooks/deploy_cluster.yml45
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml163
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml106
-rw-r--r--playbooks/gcp/openshift-cluster/deprovision.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/install.yml33
-rw-r--r--playbooks/gcp/openshift-cluster/install_gcp.yml21
-rw-r--r--playbooks/gcp/openshift-cluster/inventory.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/launch.yml12
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml (renamed from playbooks/gcp/provision.yml)9
-rw-r--r--playbooks/gcp/openshift-cluster/publish_image.yml9
l---------playbooks/gcp/openshift-cluster/roles1
-rw-r--r--playbooks/init/base_packages.yml6
-rw-r--r--playbooks/init/basic_facts.yml (renamed from playbooks/init/facts.yml)57
-rw-r--r--playbooks/init/cluster_facts.yml42
-rw-r--r--playbooks/init/evaluate_groups.yml8
-rw-r--r--playbooks/init/main.yml11
-rw-r--r--playbooks/init/repos.yml4
-rw-r--r--playbooks/init/sanity_checks.yml3
-rw-r--r--playbooks/init/validate_hostnames.yml4
-rw-r--r--playbooks/init/version.yml4
-rw-r--r--playbooks/openshift-checks/adhoc.yml1
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml33
-rw-r--r--playbooks/openshift-etcd/scaleup.yml47
-rw-r--r--playbooks/openshift-etcd/upgrade.yml6
-rw-r--r--playbooks/openshift-grafana/config.yml4
-rw-r--r--playbooks/openshift-grafana/private/config.yml6
l---------playbooks/openshift-grafana/private/filter_plugins1
l---------playbooks/openshift-grafana/private/lookup_plugins1
l---------playbooks/openshift-grafana/private/roles1
-rw-r--r--playbooks/openshift-hosted/deploy_registry.yml4
-rw-r--r--playbooks/openshift-hosted/deploy_router.yml4
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml4
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml3
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml3
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml2
-rw-r--r--playbooks/openshift-logging/private/config.yml40
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml86
-rw-r--r--playbooks/openshift-master/scaleup.yml41
-rw-r--r--playbooks/openshift-metrics/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml2
-rw-r--r--playbooks/openshift-node/scaleup.yml24
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml8
-rw-r--r--playbooks/openshift-prometheus/uninstall.yml2
-rw-r--r--playbooks/openstack/README.md28
-rw-r--r--playbooks/openstack/advanced-configuration.md127
-rwxr-xr-xplaybooks/openstack/inventory.py (renamed from playbooks/openstack/sample-inventory/inventory.py)54
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml3
-rw-r--r--playbooks/prerequisites.yml9
112 files changed, 1521 insertions, 467 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
deleted file mode 100644
index faeb332ad..000000000
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
- roles:
- - role: openshift_logging
- openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
- hosts: masters:!masters[0]
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
- tasks:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index d203b9cda..cf811ca84 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -198,3 +198,21 @@ At this point your cluster should be ready for workloads. Proceed to deploy app
### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
+
+## Uninstall / Deprovisioning
+
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
+```
+
+This should result in removal of the security groups and VPC that were created.
+
+Cleaning up the S3 bucket contents can be accomplished with:
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml
+```
+
+NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index e7bed4f6e..46c453333 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,8 +1,7 @@
#!/usr/bin/ansible-playbook
---
-- name: Setup the vpc and the master node group
+- name: Accept nodes
hosts: localhost
- remote_user: root
gather_facts: no
tasks:
- name: Alert user to variables needed - clusterid
@@ -17,37 +16,7 @@
import_role:
name: lib_openshift
- - name: fetch masters
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: mastersout
- retries: 20
- delay: 3
- until: "'instances' in mastersout and mastersout.instances|length > 0"
-
- - name: fetch new node instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": node
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: "'instances' in instancesout and instancesout.instances|length > 0"
-
- - debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
-
- - name: approve nodes
- oc_adm_csr:
- #approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- timeout: 60
- register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ - name: accept nodes
+ import_role:
+ name: openshift_aws
+ tasks_from: accept_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
deleted file mode 100644
index 9d9ed29de..000000000
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a3fc82f9a..938e83f5e 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -18,29 +18,8 @@
- name: run the init
import_playbook: ../../init/main.yml
-- name: perform the installer openshift-checks
- import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
-- name: etcd install
- import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
- import_playbook: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
- import_playbook: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
- import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
- import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
- import_playbook: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 7dde60b7d..d538b862d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,8 +1,7 @@
---
-- name: Setup the elb and the master node group
+- name: Alert user to variables needed
hosts: localhost
tasks:
-
- name: Alert user to variables needed - clusterid
debug:
msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
@@ -11,6 +10,13 @@
debug:
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+- import_playbook: provision_s3.yml
+
+- import_playbook: provision_elb.yml
+
+- name: Create the master node group
+ hosts: localhost
+ tasks:
- name: provision cluster
import_role:
name: openshift_aws
diff --git a/playbooks/aws/openshift-cluster/provision_elb.yml b/playbooks/aws/openshift-cluster/provision_elb.yml
new file mode 100644
index 000000000..9f27dca3b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_elb.yml
@@ -0,0 +1,9 @@
+---
+- name: Create elb
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: provision elb
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_elb.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index f98f5be9a..bd154fa83 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -15,5 +15,5 @@
- name: Include the accept.yml playbook to accept nodes into the cluster
import_playbook: accept.yml
-- name: Include the hosted.yml playbook to finish the hosted configuration
- import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/aws/openshift-cluster/provision_s3.yml b/playbooks/aws/openshift-cluster/provision_s3.yml
new file mode 100644
index 000000000..45b439083
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Create s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: create s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
new file mode 100644
index 000000000..180c2281a
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: uninstall_sec_group.yml
+
+- import_playbook: uninstall_vpc.yml
+
+- import_playbook: uninstall_ssh_keypair.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml
new file mode 100644
index 000000000..448b47aee
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Empty/delete s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: empty/delete s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
new file mode 100644
index 000000000..642e5b169
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete security groups
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
new file mode 100644
index 000000000..ec9caa51b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: remove ssh keypair(s)
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_ssh_keys.yml
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
new file mode 100644
index 000000000..4c988bcc5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index f6b1a6b5d..78484fdbd 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -21,6 +21,12 @@ openshift_release: # v3.7
# This will be dependent on the version provided by the yum repository
openshift_pkg_version: # -3.7.0
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
# specify a clusterid
# This value is also used as the default value for many other components.
#openshift_aws_clusterid: default
@@ -41,11 +47,27 @@ openshift_pkg_version: # -3.7.0
# a vpc, set this to false.
#openshift_aws_create_vpc: true
+# when openshift_aws_create_vpc is true (the default), the VPC defined in
+# openshift_aws_vpc will be created
+#openshift_aws_vpc:
+# name: "{{ openshift_aws_vpc_name }}"
+# cidr: 172.31.0.0/16
+# subnets:
+# us-east-1:
+# - cidr: 172.31.48.0/20
+# az: "us-east-1c"
+# default_az: true
+# - cidr: 172.31.32.0/20
+# az: "us-east-1e"
+# - cidr: 172.31.16.0/20
+# az: "us-east-1a"
+
# Name of the vpc. Needs to be set if using a pre-existing vpc.
#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
-# vpc + subnet.
+# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above
+# example VPC structure)
#openshift_aws_subnet_az:
# -------------- #
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
new file mode 100644
index 000000000..7ede3a28c
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
@@ -0,0 +1,20 @@
+# v3.10 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
new file mode 100644
index 000000000..977b4f381
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
new file mode 100644
index 000000000..8b76bf4ff
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
new file mode 100644
index 000000000..b4353edc2
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 869e185af..c8f397186 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,3 +12,5 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
index 23a3fcbb5..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
diff --git a/playbooks/cluster-operator/aws/infrastructure.yml b/playbooks/cluster-operator/aws/infrastructure.yml
new file mode 100644
index 000000000..9669820fb
--- /dev/null
+++ b/playbooks/cluster-operator/aws/infrastructure.yml
@@ -0,0 +1,21 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_s3.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_elb.yml
diff --git a/playbooks/cluster-operator/aws/roles b/playbooks/cluster-operator/aws/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/cluster-operator/aws/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index ef8233b67..6d82fa928 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -17,6 +17,8 @@
- name: Create service signer certificate
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Create remote temp directory for creating certs
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index ffb11670d..8392e21ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -51,13 +51,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
until: not (l_docker_upgrade_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_docker_upgrade_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8ee83819e..ba783638d 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,7 +5,8 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index fc1cbf32a..07be0b0d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -31,7 +31,7 @@
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
- hostvars[item].openshift is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 1b57521df..9c927c0a1 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -1,7 +1,15 @@
---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+ hosts: oo_first_master
+ roles:
+ - role: openshift_web_console
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift_upgrade_target is version_compare('3.9','>=')
+
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
@@ -105,6 +113,25 @@
registry_url: "{{ openshift.master.registry_url }}"
openshift_hosted_templates_import_command: replace
+ post_tasks:
+ # we need to migrate customers to the new pattern of pushing to the registry via dns
+ # Step 1: verify the certificates have the docker registry service name
+ - shell: >
+ echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)'
+ register: cert_output
+ changed_when: false
+ failed_when:
+ - cert_output.rc not in [0, 1]
+
+ # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
+ - name: set a fact to include the registry certs playbook if needed
+ set_fact:
+ openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}"
+
+# Run the redeploy certs based upon the certificates
+- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry
+ import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml
+
# Check for warnings to be printed at the end of the upgrade:
- name: Clean up and display warnings
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index da63450b8..44af37b2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -5,8 +5,6 @@
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
-- import_playbook: verify_cluster.yml
-
- name: Update repos on upgrade hosts
hosts: "{{ l_upgrade_repo_hosts }}"
roles:
@@ -49,10 +47,12 @@
# to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+- import_playbook: verify_cluster.yml
+
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 693ab2d96..463a05688 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -17,6 +17,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_pkg_version is defined
+ - openshift_pkg_version != ""
- openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
- fail:
@@ -25,6 +26,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_image_tag is defined
+ - openshift_image_tag != ""
- openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
- set_fact:
@@ -92,3 +94,25 @@
state: started
enabled: yes
with_items: "{{ master_services }}"
+
+# Until openshift-ansible is determining which host is the CA host we
+# must (unfortunately) ensure that the first host in the etcd group is
+# the etcd CA host.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1469358
+- name: Verify we can proceed on first etcd
+ hosts: oo_first_etcd
+ gather_facts: no
+ tasks:
+ - name: Ensure CA exists on first etcd
+ stat:
+ path: /etc/etcd/generated_certs
+ register: __etcd_ca_stat
+
+ - fail:
+ msg: >
+ In order to correct an etcd certificate signing problem
+ upgrading may require re-generating etcd certificates. Please
+ ensure that the /etc/etcd/generated_certs directory exists on
+ the first host defined in your [etcd] group.
+ when:
+ - not __etcd_ca_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 4c1156f4b..45ddf7eea 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift_service_type }}"
+ name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 412075d41..40e245d75 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,6 +2,7 @@
###############################################################################
# Upgrade Masters
###############################################################################
+
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -22,6 +23,8 @@
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade all storage
command: >
@@ -30,7 +33,6 @@
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
@@ -46,13 +48,10 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
+ roles:
+ - openshift_facts
tasks:
- - import_role:
- name: openshift_facts
-
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
@@ -71,6 +70,12 @@
- include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
+ - name: Disable master controller
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ enabled: false
+ when: openshift.common.rolling_restart_mode == 'system'
+
- include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
@@ -93,7 +98,6 @@
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- openshift_version is version_compare('3.7','<')
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
run_once: true
@@ -127,6 +131,7 @@
hosts: oo_masters_to_config
roles:
- { role: openshift_cli }
+ - { role: openshift_facts }
vars:
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
@@ -228,7 +233,6 @@
register: l_pb_upgrade_control_plane_post_upgrade_storage
when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
@@ -289,12 +293,18 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not (l_upgrade_control_plane_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_control_plane_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
roles:
- openshift_facts
@@ -302,13 +312,9 @@
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ openshift_manage_node_is_master: true
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 464af3ae6..915fae9fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -33,27 +33,28 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
post_tasks:
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 6d59bfd0b..e259b5d09 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -50,11 +50,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
delay: 5
failed_when:
- l_upgrade_nodes_drain_result is failed
- - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
new file mode 100644
index 000000000..ec1da6d39
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
@@ -0,0 +1,7 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: upgrade_control_plane.yml
+
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
new file mode 100644
index 000000000..64ee03562
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
@@ -0,0 +1,58 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks 3.10
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.10'
+ openshift_upgrade_min: '3.9'
+ openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ openshift_release: '3.10'
+
+- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
new file mode 100644
index 000000000..eea1b250e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
@@ -0,0 +1,35 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.10'
+ openshift_upgrade_min: '3.9'
+ openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index d520c6aee..a2d21b69f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index eb5f07ae0..9aa5a3b64 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4daa9e490..cc2ec2709 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 8d42e4c91..b1ecc75d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 49e691352..9c7688981 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,7 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+ - { role: openshift_facts }
tasks:
- name: Check for invalid namespaces and SDN errors
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 0f74e0137..a73b7d63a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
@@ -35,8 +36,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index a2f316c25..723b2e533 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -14,7 +14,8 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -35,6 +36,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
@@ -47,8 +49,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index 1d4d1919c..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,20 +1 @@
---
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 0aea5069d..ec1da6d39 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,54 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- import_playbook: ../init.yml
+- import_playbook: upgrade_control_plane.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.9'
- openshift_upgrade_min: '3.7'
-
-- import_playbook: ../pre/config.yml
- vars:
- l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
- l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_facts
- tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index ef9871008..8792295c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -14,14 +14,29 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-- name: Configure the upgrade target for the common upgrade tasks
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.9'
+ openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.8'
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ openshift_pkg_version: ''
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -35,21 +50,70 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
# Pre-upgrade completed
+- name: Intermediate 3.8 Upgrade
+ import_playbook: ../upgrade_control_plane.yml
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: ../upgrade_control_plane.yml
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
+
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.8'
+ openshift_release: '3.9'
+ openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
+ openshift_version_reinit: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+- import_playbook: ../upgrade_control_plane.yml
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -58,13 +122,21 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
+ - name: Restart master controllers to force new leader election mode
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: restart
+ when: openshift.common.rolling_restart_mode == 'service'
+ - name: Re-enable master controllers to force new leader election mode
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ enabled: true
+ when: openshift.common.rolling_restart_mode == 'system'
- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 1d1b255c1..859b1d88b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -12,6 +12,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
index 4bd2d87b1..d8540abfb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -1,5 +1,5 @@
---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
hosts: oo_first_master
roles:
- { role: lib_openshift }
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
index 7fd60743c..8fb7b63e8 100644
--- a/playbooks/container-runtime/private/build_container_groups.yml
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -1,6 +1,8 @@
---
+# l_build_container_groups_hosts is passed in via prerequisites.yml during
+# etcd scaleup plays.
- name: create oo_hosts_containerized_managed_true host group
- hosts: oo_all_hosts:!oo_nodes_to_config
+ hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}"
tasks:
- group_by:
- key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
+ key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 7a49adcf0..5396df20a 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,7 +1,13 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}"
+ vars:
+ l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
roles:
- role: container_runtime
tasks:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index a6d396270..586149b1d 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,8 +1,13 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}"
vars:
+ l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
# role: container_runtime is necessary here to bring role default variables
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 5efdc486a..c8e30ddbc 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -1,49 +1,8 @@
---
- import_playbook: init/main.yml
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
- import_playbook: openshift-node/private/config.yml
-- import_playbook: openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
+- import_playbook: common/private/components.yml
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
new file mode 100644
index 000000000..8e9b0024a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -0,0 +1,163 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_root_image
+ fail:
+ msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined."
+ when: openshift_gcp_root_image is undefined
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_root_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: build_instance_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+ pre_tasks:
+ - set_fact:
+ allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+ - set_fact:
+ using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+ hosts: build_instance_ips
+ roles:
+ - role: rhel_subscribe
+ when: using_rhel_subscriptions
+ - role: openshift_repos
+ vars:
+ openshift_additional_repos: []
+ post_tasks:
+ - name: Add custom repositories
+ include_role:
+ name: openshift_gcp
+ tasks_from: add_custom_repositories.yml
+ - name: Add the Google Cloud repo
+ yum_repository:
+ name: google-cloud
+ description: Google Cloud Compute
+ baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ gpgcheck: yes
+ repo_gpgcheck: yes
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Add the jdetiber-qemu-user-static copr repo
+ yum_repository:
+ name: jdetiber-qemu-user-static
+ description: QEMU user static COPR
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+ gpgcheck: yes
+ repo_gpgcheck: no
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Accept GPG keys for the repos
+ command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
+ - name: Install qemu-user-static
+ package:
+ name: qemu-user-static
+ state: present
+ - name: Start and enable systemd-binfmt service
+ systemd:
+ name: systemd-binfmt
+ state: started
+ enabled: yes
+
+- name: Build image
+ hosts: build_instance_ips
+ pre_tasks:
+ - name: Set up core host GCP configuration
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_gcp_base_image.yml
+ roles:
+ - role: os_update_latest
+ post_tasks:
+ - name: Disable all repos on RHEL
+ command: subscription-manager repos --disable="*"
+ when: using_rhel_subscriptions
+ - name: Enable repos for packages on RHEL
+ command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+ when: using_rhel_subscriptions
+ - name: Install common image prerequisites
+ package: name={{ item }} state=latest
+ with_items:
+ # required by Ansible
+ - PyYAML
+ - google-compute-engine
+ - google-compute-engine-init
+ - google-config
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - iptables-services
+ - bridge-utils
+ - bash-completion
+ - name: Clean yum metadata
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
new file mode 100644
index 000000000..787de8ebc
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -0,0 +1,106 @@
+---
+- name: Verify prerequisites for image build
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_base_image
+ fail:
+ msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined."
+ when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+ openshift_master_unsupported_embedded_etcd: True
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_base_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - name: add host to nodes
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: nodes
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- hosts: nodes
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ tasks_from: node_cloud_config.yml
+ - include_role:
+ name: openshift_gcp
+ tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml
new file mode 100644
index 000000000..589fddd2f
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/deprovision.yml
@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+ hosts: localhost
+ connection: local
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ vars:
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml
new file mode 100644
index 000000000..fb35b4348
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install.yml
@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: place all scale groups into Ansible groups
+ include_role:
+ name: openshift_gcp
+ tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+ import_playbook: install_gcp.yml
+
+- name: install components
+ import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+ gather_facts: no
+ tasks:
+ - name: Retrieve cluster configuration
+ fetch:
+ src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ dest: "/tmp/"
+ flat: yes
diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml
new file mode 100644
index 000000000..09db78971
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install_gcp.yml
@@ -0,0 +1,21 @@
+---
+- hosts: masters
+ gather_facts: no
+ tasks:
+ - name: create master health check service
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_healthcheck.yml
+ - name: configure node bootstrapping
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_bootstrap.yml
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - name: configure node bootstrap autoapprover
+ include_role:
+ name: openshift_bootstrap_autoapprover
+ tasks_from: main
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - openshift_master_bootstrap_auto_approve | default(False) | bool
diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml
new file mode 100644
index 000000000..96de6d6db
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/inventory.yml
@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: materialize the inventory
+ include_role:
+ name: openshift_gcp
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml
new file mode 100644
index 000000000..02f00408a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/launch.yml
@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+ when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+ tasks:
+ - meta: refresh_inventory
+
+- import_playbook: install.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
index b6edf9961..293a195c9 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -3,11 +3,10 @@
hosts: localhost
connection: local
gather_facts: no
+ roles:
+ - openshift_gcp
tasks:
-
- - name: provision a GCP cluster in the specified project
+ - name: recalculate the dynamic inventory
import_role:
name: openshift_gcp
-
-- name: run the cluster deploy
- import_playbook: ../deploy_cluster.yml
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml
new file mode 100644
index 000000000..76fd49e9c
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/publish_image.yml
@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: openshift_gcp
+ tasks_from: publish_image.yml
diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 15b3dd492..81f4dd183 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Install packages necessary for installer
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
any_errors_fatal: true
tasks:
- when:
@@ -14,7 +16,9 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"
- yum-utils
+ when: item != ''
register: result
until: result is succeeded
diff --git a/playbooks/init/facts.yml b/playbooks/init/basic_facts.yml
index 8e4206948..a9bf06693 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -4,15 +4,13 @@
any_errors_fatal: true
tasks:
-- name: Initialize host facts
- # l_upgrade_non_node_hosts is passed in via play during control-plane-only
- # upgrades; otherwise oo_all_hosts is used.
- hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
+- name: Initialize basic host facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
tasks:
- - name: load openshift_facts module
- import_role:
- name: openshift_facts
-
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
import_role:
@@ -58,41 +56,6 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - name: Gather Cluster facts
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
- - name: Set fact of no_proxy_internal_hostnames
- openshift_facts:
- role: common
- local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
- - name: Initialize openshift.node.sdn_mtu
- openshift_facts:
- role: node
- local_facts:
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
- name: Initialize special first-master variables
hosts: oo_first_master
roles:
@@ -104,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml
new file mode 100644
index 000000000..636679e32
--- /dev/null
+++ b/playbooks/init/cluster_facts.yml
@@ -0,0 +1,42 @@
+---
+- name: Initialize cluster facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Gather Cluster facts
+ openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index c4cd226c9..e8bf1892c 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -45,9 +45,13 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one, three or five
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
+ for documentation on how to migrate from embedded to external etcd.
when:
- - g_etcd_hosts | default([]) | length not in [3,1]
+ - g_etcd_hosts | default([]) | length not in [5,3,1]
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 8a3f4682d..9886691e0 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -1,4 +1,7 @@
---
+# skip_verison and l_install_base_packages are passed in via prerequistes.yml.
+# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
+
- name: Initialization Checkpoint Start
hosts: all
gather_facts: false
@@ -15,7 +18,13 @@
- import_playbook: evaluate_groups.yml
-- import_playbook: facts.yml
+- import_playbook: basic_facts.yml
+
+# base_packages needs to be setup for openshift_facts.py to run correctly.
+- import_playbook: base_packages.yml
+ when: l_install_base_packages | default(False) | bool
+
+- import_playbook: cluster_facts.yml
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index 667f38ddd..655a7e83a 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
gather_facts: no
tasks:
- name: subscribe instances to Red Hat Subscription Manager
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
index 52bcf42c0..fbbb3f8fb 100644
--- a/playbooks/init/sanity_checks.yml
+++ b/playbooks/init/sanity_checks.yml
@@ -1,4 +1,5 @@
---
+# l_sanity_check_hosts may be passed in during scale-up plays
- name: Verify Requirements
hosts: oo_first_master
roles:
@@ -11,5 +12,5 @@
# Thus, sanity_checks cannot gather new information about any hosts.
- name: Run variable sanity checks
sanity_checks:
- check_hosts: "{{ groups['oo_all_hosts'] }}"
+ check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}"
run_once: True
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index 86e0b2416..b49f7dd08 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -25,7 +25,7 @@
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
- - openshift_hostname_check | default(true)
+ - openshift_hostname_check | default(true) | bool
- name: Validate openshift_ip exists on node when defined
fail:
@@ -40,4 +40,4 @@
when:
- openshift_ip is defined
- openshift_ip not in ansible_all_ipv4_addresses
- - openshift_ip_check | default(true)
+ - openshift_ip_check | default(true) | bool
diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml
index 8d1d61fde..962ee7220 100644
--- a/playbooks/init/version.yml
+++ b/playbooks/init/version.yml
@@ -6,7 +6,7 @@
- include_role:
name: openshift_version
tasks_from: first_master.yml
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}"
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
@@ -16,7 +16,7 @@
vars:
l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version }}"
+ l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
tasks:
- set_fact:
diff --git a/playbooks/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml
index 414090733..249222ae4 100644
--- a/playbooks/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/adhoc.yml
@@ -11,6 +11,7 @@
# usage. Running this play only in localhost speeds up execution.
hosts: localhost
connection: local
+ gather_facts: false
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 8997680f9..fea588260 100644
--- a/playbooks/openshift-etcd/private/upgrade_main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -1,4 +1,37 @@
---
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
+ tasks:
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: ../../openshift-master/private/restart.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 7e9ab6834..656454fe3 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -1,4 +1,51 @@
---
+- import_playbook: ../init/evaluate_groups.yml
+
+- name: Ensure there are new_etcd
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ Detected no new_etcd in inventory. Please add hosts to the
+ new_etcd host group to add etcd hosts.
+ when:
+ - g_new_etcd_hosts | default([]) | length == 0
+
+ - fail:
+ msg: >
+ Detected new_etcd host is member of new_masters or new_nodes. Please
+ run playbooks/openshift-master/scaleup.yml or
+ playbooks/openshift-node/scaleup.yml before running this play.
+ when: >
+ inventory_hostname in (groups['new_masters'] | default([]))
+ or inventory_hostname in (groups['new_nodes'] | default([]))
+
+# We only need to run this if etcd is being installed on a standalone host;
+# If etcd is part of master or node group, there's no need to
+# re-run prerequisites
+- import_playbook: ../prerequisites.yml
+ vars:
+ # We need to ensure container_runtime is only processed for containerized
+ # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts
+ l_build_container_groups_hosts: "oo_new_etcd_to_config"
+ l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
+ l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
+ when:
+ - inventory_hostname not in groups['oo_masters']
+ - inventory_hostname not in groups['oo_nodes_to_config']
+
+# If this etcd host is part of a master or node, we don't need to run
+# prerequisites, we can just init facts as normal.
- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ when:
+ - inventory_hostname in groups['oo_masters']
+ - inventory_hostname in groups['oo_nodes_to_config']
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index ccc797527..77999d92c 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -1,4 +1,8 @@
---
-- import_playbook: ../init/evaluate_groups.yml
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-grafana/config.yml b/playbooks/openshift-grafana/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-grafana/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-grafana/private/config.yml b/playbooks/openshift-grafana/private/config.yml
new file mode 100644
index 000000000..ac753d63b
--- /dev/null
+++ b/playbooks/openshift-grafana/private/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Deploy grafana server
+ hosts: masters
+ tasks:
+ - include_role:
+ name: openshift_grafana
diff --git a/playbooks/openshift-grafana/private/filter_plugins b/playbooks/openshift-grafana/private/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/openshift-grafana/private/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-grafana/private/lookup_plugins b/playbooks/openshift-grafana/private/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/openshift-grafana/private/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-grafana/private/roles b/playbooks/openshift-grafana/private/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/openshift-grafana/private/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml
new file mode 100644
index 000000000..2453329dd
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_registry.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_registry.yml
diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml
new file mode 100644
index 000000000..e832eeeea
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_router.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_router.yml
diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 62fe0dd60..c59ebcead 100644
--- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere']
diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index b817221b8..d88209593 100644
--- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
@@ -88,8 +88,7 @@
- name: Redeploy docker registry
command: >
- {{ openshift_client_binary }} deploy dc/docker-registry
- --latest
+ {{ openshift_client_binary }} rollout latest dc/docker-registry
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 0df748f47..952a5f4ee 100644
--- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -129,8 +129,7 @@
- name: Redeploy router
command: >
- {{ openshift_client_binary }} deploy dc/router
- --latest
+ {{ openshift_client_binary }} rollout latest dc/router
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 54c8483c8..4a83dd955 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -24,7 +24,7 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_masters_to_config.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
- role: tuned
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index d6b26647c..f2a57f9f8 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -11,6 +11,38 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+- name: Update vm.max_map_count for ES 5.x
+ hosts: all
+ gather_facts: false
+ tasks:
+ - when:
+ - openshift_logging_es5_techpreview | default(false) | bool
+ - openshift_deployment_type in ['origin']
+ block:
+ - name: Checking vm max_map_count value
+ command:
+ cat /proc/sys/vm/max_map_count
+ register: _vm_max_map_count
+
+ - stat:
+ path: /etc/sysctl.d/99-elasticsearch.conf
+ register: _99_es_conf
+
+ - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf
+ command: >
+ sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/'
+ register: _curr_vm_max_map_count
+ when: _99_es_conf.stat.exists
+
+ - name: Updating vm.max_map_count value
+ sysctl:
+ name: vm.max_map_count
+ value: 262144
+ sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf"
+ reload: yes
+ when:
+ - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
@@ -20,10 +52,10 @@
- name: Update Master configs
hosts: oo_masters:!oo_first_master
tasks:
- - block:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
+ - include_role:
+ name: openshift_logging
+ tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..56af18ca7 100644
--- a/playbooks/openshift-master/private/certificates-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
@@ -28,6 +28,7 @@
path: "{{ openshift.common.config_base }}/master/{{ item }}"
state: absent
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- "etcd.server.crt"
- "etcd.server.key"
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 007b23ea3..20ebf70d3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 59e2b515c..cc812c300 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -142,11 +142,6 @@
state: absent
changed_when: False
-- name: Setup extension file for service console UI
- template:
- src: ../templates/openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
- name: Update master config
yedit:
state: present
@@ -166,8 +161,6 @@
value: [X-Remote-Group]
- key: authConfig.requestHeader.extraHeaderPrefixes
value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- key: kubernetesMasterConfig.apiServerArguments.runtime-config
value: [apis/settings.k8s.io/v1alpha1=true]
- key: admissionConfig.pluginConfig.PodPreset.configuration.kind
@@ -178,37 +171,50 @@
value: false
register: yedit_output
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift_service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift_service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when:
- - yedit_output.changed
+# Only add the catalog extension script if not 3.9. From 3.9 on, the console
+# can discover if template service broker is running.
+- when: not openshift.common.version_gte_3_9
+ block:
+ - name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+ - name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ register: yedit_asset_config_output
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
+#restart master serially here
+- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed)
+ block:
+ - name: restart master api
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+
+ # We retry the controllers because the API may not be 100% initialized yet.
+ - name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
+
+ - name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 7d31340a2..09e205afc 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -1,22 +1,43 @@
---
- import_playbook: ../init/evaluate_groups.yml
-- name: Ensure there are new_masters or new_nodes
+- name: Ensure there are new_masters and new_nodes
hosts: localhost
connection: local
gather_facts: no
tasks:
- fail:
+ # new_masters must be part of new_nodes as well; otherwise if new_nodes
+ # is not present, oo_nodes_to_config will contain all existing nodes.
msg: >
- Detected no new_masters or no new_nodes in inventory. Please
- add hosts to the new_masters and new_nodes host groups to add
- masters.
- when:
- - g_new_master_hosts | default([]) | length == 0
- - g_new_node_hosts | default([]) | length == 0
+ Detected no new_masters and/or no new_nodes in inventory. New
+ masters must be part of both new_masters and new_nodes groups.
+ If you are adding just new_nodes, use the
+ playbooks/openshift-node/scaleup.yml play.
+ when: >
+ g_new_master_hosts | default([]) | length == 0
+ or g_new_node_hosts | default([]) | length == 0
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- name: Ensure there are new_masters and new_nodes
+ hosts: oo_masters_to_config
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ # new_masters must be part of new_nodes as well;
+ msg: >
+ Each host in new_masters must also appear in new_nodes
+ when: inventory_hostname not in groups['oo_nodes_to_config']
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config"
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 1e237e3f0..889ea77b1 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -25,6 +25,7 @@
import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: not openshift.common.version_gte_3_9
- name: Metrics Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 7249ced70..7371bd7ac 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,6 +16,7 @@
until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
+ when: openshift_node_restart_docker_required | default(True)
- name: Restart containerized services
service:
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index 8b7272485..cdf816fbf 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -4,3 +4,5 @@
- import_playbook: private/redeploy-certificates.yml
- import_playbook: private/restart.yml
+ vars:
+ openshift_node_restart_docker_required: False
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index cf13692ae..9cc7263b7 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -12,9 +12,27 @@
new_nodes host group to add nodes.
when:
- g_new_node_hosts | default([]) | length == 0
+ - fail:
+ msg: >
+ Please run playbooks/openshift-master/scaleup.yml if you need to
+ scale up both masters and nodes. This playbook is only needed if
+ you are only adding new nodes and not new masters.
+ when:
+ - g_new_node_hosts | default([]) | length > 0
+ - g_new_master_hosts | default([]) | length > 0
+
+# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
+# g_new_node_hosts via evaluate_groups.yml
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_nodes_to_config"
- import_playbook: private/config.yml
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
new file mode 100644
index 000000000..2df39c2a8
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall Prometheus
+ hosts: masters[0]
+ tasks:
+ - name: Run the Prometheus Uninstall Role Tasks
+ include_role:
+ name: openshift_prometheus
+ tasks_from: uninstall
diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-prometheus/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index d361d6278..842bb34de 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -30,15 +30,17 @@ version 10) or newer. It must also satisfy these requirements:
- look at
the [Minimum Hardware Requirements page][hardware-requirements]
for production
-* The keypair for SSH must be available in openstack
-* `keystonerc` file that lets you talk to the openstack services
+* The keypair for SSH must be available in OpenStack
+* `keystonerc` file that lets you talk to the OpenStack services
* NOTE: only Keystone V2 is currently supported
+* A host with the supported version of [Ansible][ansible] installed, see the
+ [Setup section of the openshift-ansible README][openshift-ansible-setup]
+ for details on the requirements.
Optional:
* External Neutron network with a floating IP address pool
-
## Installation
There are four main parts to the installation:
@@ -68,12 +70,11 @@ First, you need to select where to run [Ansible][ansible] from (the
*Ansible host*). This can be the computer you read this guide on or an
OpenStack VM you'll create specifically for this purpose.
-We will use
-a
+This guide will use a
[Docker image that has all the dependencies installed][control-host-image] to
make things easier. If you don't want to use Docker, take a look at
the [Ansible host dependencies][ansible-dependencies] and make sure
-they're installed.
+they are installed.
Your *Ansible host* needs to have the following:
@@ -183,13 +184,16 @@ Then run the provision + install playbook -- this will create the OpenStack
resources:
```bash
-$ ansible-playbook --user openshift -i inventory \
- openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \
- -e openshift_repos_enable_testing=true
+$ ansible-playbook --user openshift \
+ -i openshift-ansible/playbooks/openstack/inventory.py \
+ -i inventory \
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml
```
-Note, you may want to use the testing repo for development purposes only.
-Normally, `openshift_repos_enable_testing` should not be specified.
+In addition to *your* inventory with your OpenShift and OpenStack
+configuration, we are also supplying the [dynamic inventory][dynamic] from
+`openshift-ansible/inventory`. It's a script that will look at the Nova servers
+and other resources that will be created and let Ansible know about them.
If you're using multiple inventories, make sure you pass the path to
the right one to `-i`.
@@ -219,6 +223,7 @@ advanced configuration:
[ansible]: https://www.ansible.com/
[openshift-ansible]: https://github.com/openshift/openshift-ansible
+[openshift-ansible-setup]: https://github.com/openshift/openshift-ansible#setup
[devstack]: https://docs.openstack.org/devstack/
[tripleo]: http://tripleo.org/
[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node
@@ -233,3 +238,4 @@ advanced configuration:
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
+[dynamic]: http://docs.ansible.com/ansible/latest/intro_dynamic_inventory.html
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index 2c9b70b5f..e8f4cfc32 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -1,9 +1,8 @@
## Dependencies for localhost (ansible control/admin node)
-* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
-* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
-* [jinja2](http://jinja.pocoo.org/docs/2.9/)
-* [shade](https://pypi.python.org/pypi/shade)
+* [Ansible](https://pypi.python.org/pypi/ansible) version >=2.4.0
+* [jinja2](http://jinja.pocoo.org/docs/2.9/) version >= 2.10
+* [shade](https://pypi.python.org/pypi/shade) version >= 1.26
* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
* Become (sudo) is not required.
@@ -133,7 +132,7 @@ You can also access the OpenShift cluster with a web browser by going to:
https://master-0.openshift.example.com:8443
Note that for this to work, the OpenShift nodes must be accessible
-from your computer and it's DNS configuration must use the cruster's
+from your computer and its DNS configuration must use the cluster's
DNS.
@@ -153,7 +152,7 @@ openstack stack delete --wait --yes openshift.example.com
Pay special attention to the values in the first paragraph -- these
will depend on your OpenStack environment.
-Note that the provsisioning playbooks update the original Neutron subnet
+Note that the provisioning playbooks update the original Neutron subnet
created with the Heat stack to point to the configured DNS servers.
So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
@@ -162,7 +161,7 @@ without dnsmasq proxies.
The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
will form the cluster's public DNS domain all your servers will be under. With
the default values, this will be `openshift.example.com`. For workloads, the
-default subdomain is 'apps'. That sudomain can be set as well by the
+default subdomain is 'apps'. That subdomain can be set as well by the
`openshift_openstack_app_subdomain` variable in the inventory.
If you want to use a two sets of hostnames for public and private/prefixed DNS
@@ -334,7 +333,7 @@ or your trusted network. The most important is the `openshift_openstack_node_ing
that restricts public access to the deployed DNS server and cluster
nodes' ephemeral ports range.
-Note, the command ``curl https://api.ipify.org`` helps fiding an external
+Note, the command ``curl https://api.ipify.org`` helps finding an external
IP address of your box (the ansible admin node).
There is also the `manage_packages` variable (defaults to True) you
@@ -372,6 +371,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos
Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
you cannot set this value to simply `openshift.example.com`.
+
+## Using Cinder-backed Persistent Volumes
+
+You will need to set up OpenStack credentials. You can try putting this in your
+`inventory/group_vars/OSEv3.yml`:
+
+ openshift_cloudprovider_kind: openstack
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}"
+ openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+ openshift_cloudprovider_openstack_blockstorage_version: v2
+
+**NOTE**: you must specify the Block Storage version as v2, because OpenShift
+does not support the v3 API yet and the version detection is currently not
+working properly.
+
+For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials].
+
+[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
+
+**NOTE** the OpenStack integration currently requires DNS to be configured and
+running and the `openshift_hostname` variable must match the Nova server name
+for each node. The cluster deployment will fail without it. If you use the
+provided OpenStack dynamic inventory and configure the
+`openshift_openstack_dns_nameservers` Ansible variable, this will be handled
+for you.
+
+After a successful deployment, the cluster is configured for Cinder persistent
+volumes.
+
+### Validation
+
+1. Log in and create a new project (with `oc login` and `oc new-project`)
+2. Create a file called `cinder-claim.yaml` with the following contents:
+
+```yaml
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+ name: "claim1"
+spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+```
+3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift
+4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound`
+5. Run `openstack volume list`
+ * A new volume called `kubernetes-dynamic-pvc-UUID` should be created
+ * Its size should be `1`
+ * It should not be attached to any server
+6. Create a file called `mysql-pod.yaml` with the following contents:
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mysql
+ labels:
+ name: mysql
+spec:
+ containers:
+ - resources:
+ limits :
+ cpu: 0.5
+ image: openshift/mysql-55-centos7
+ name: mysql
+ env:
+ - name: MYSQL_ROOT_PASSWORD
+ value: yourpassword
+ - name: MYSQL_USER
+ value: wp_user
+ - name: MYSQL_PASSWORD
+ value: wp_pass
+ - name: MYSQL_DATABASE
+ value: wp_db
+ ports:
+ - containerPort: 3306
+ name: mysql
+ volumeMounts:
+ - name: mysql-persistent-storage
+ mountPath: /var/lib/mysql/data
+ volumes:
+ - name: mysql-persistent-storage
+ persistentVolumeClaim:
+ claimName: claim1
+```
+
+7. Run `oc create -f mysql-pod.yaml` to create the pod
+8. Run `oc describe pod mysql`
+ * Its events should show that the pod has successfully attached the volume above
+ * It should show no errors
+ * `openstack volume list` should show the volume attached to an OpenShift app node
+ * NOTE: this can take several seconds
+9. After a while, `oc get pod` should show the `mysql` pod as running
+10. Run `oc delete pod mysql` to remove the pod
+ * The Cinder volume should no longer be attached
+11. Run `oc delete pvc claim1` to remove the volume claim
+ * The Cinder volume should be deleted
+
+
+
## Creating and using a Cinder volume for the OpenShift registry
You can optionally have the playbooks create a Cinder volume and set
@@ -415,7 +520,7 @@ OpenStack)[openstack] for more information.
[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html
-Next, we need to instruct OpenShift to use the Cinder volume for it's
+Next, we need to instruct OpenShift to use the Cinder volume for its
registry. Again in `OSEv3.yml`:
#openshift_hosted_registry_storage_kind: openstack
@@ -470,12 +575,12 @@ The **Cinder volume ID**, **filesystem** and **volume size** variables
must correspond to the values in your volume. The volume ID must be
the **UUID** of the Cinder volume, *not its name*.
-We can do formate the volume for you if you ask for it in
+The volume can also be formatted if you configure it in
`inventory/group_vars/all.yml`:
openshift_openstack_prepare_and_format_registry_volume: true
-**NOTE:** doing so **will destroy any data that's currently on the volume**!
+**NOTE:** Formatting **will destroy any data that's currently on the volume**!
You can also run the registry setup playbook directly:
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/inventory.py
index 45cc4e15a..d5a8c3e24 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -15,18 +15,10 @@ import json
import shade
-def build_inventory():
- '''Build the dynamic inventory.'''
- cloud = shade.openstack_cloud()
-
+def base_openshift_inventory(cluster_hosts):
+ '''Set the base openshift inventory.'''
inventory = {}
- # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
- # environment variable.
- cluster_hosts = [
- server for server in cloud.list_servers()
- if 'metadata' in server and 'clusterid' in server.metadata]
-
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
@@ -67,6 +59,34 @@ def build_inventory():
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
+ return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+ '''Check volumes to see if they're being used for docker storage'''
+ docker_storage_mountpoints = {}
+ for volume in volumes:
+ if volume.metadata.get('purpose') == "openshift_docker_storage":
+ for attachment in volume.attachments:
+ if attachment.server_id in docker_storage_mountpoints:
+ docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+ else:
+ docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+ return docker_storage_mountpoints
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ inventory = base_openshift_inventory(cluster_hosts)
+
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.group
@@ -76,6 +96,9 @@ def build_inventory():
inventory['_meta'] = {'hostvars': {}}
+ # cinder volumes used for docker storage
+ docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
+
for server in cluster_hosts:
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
@@ -89,13 +112,15 @@ def build_inventory():
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
+ hostvars['openshift_ip'] = server.private_v4
+
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
- hostvars['openshift_ip'] = server.private_v4
- hostvars['openshift_hostname'] = server.private_v4
+ hostvars['openshift_hostname'] = server.metadata.get(
+ 'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':
@@ -109,6 +134,11 @@ def build_inventory():
if node_labels:
hostvars['openshift_node_labels'] = node_labels
+ # check for attached docker storage volumes
+ if 'os-extended-volumes:volumes_attached' in server:
+ if server.id in docker_storage_mountpoints:
+ hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
+
inventory['_meta']['hostvars'][server.name] = hostvars
return inventory
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index a38d7bff7..73c1926a0 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -26,8 +26,8 @@
- name: Gather facts for the new nodes
setup:
-- name: set common facts
- import_playbook: ../../init/facts.yml
+- import_playbook: ../../init/basic_facts.yml
+- import_playbook: ../../init/cluster_facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 481807dc9..1287b25f3 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -20,6 +20,7 @@ openshift_hosted_registry_wait: True
#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_blockstorage_version: v2
## Use Cinder volume for Openshift registry:
@@ -42,7 +43,7 @@ openshift_hosted_registry_wait: True
# NOTE(shadower): the hostname check seems to always fail because the
# host's floating IP address doesn't match the address received from
# inside the host.
-openshift_override_hostname_check: true
+openshift_hostname_check: false
# For POCs or demo environments that are using smaller instances than
# the official recommended values for RAM and DISK, uncomment the line below.
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7802f83d9..0b76ca862 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,18 +1,21 @@
---
+# l_scale_up_hosts may be passed in via various scaleup plays.
+
- import_playbook: init/main.yml
vars:
skip_verison: True
+ l_install_base_packages: True
- import_playbook: init/validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
- import_playbook: init/repos.yml
-- import_playbook: init/base_packages.yml
-
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"
+ vars:
+ l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config"
roles:
- role: os_firewall