summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml8
-rw-r--r--playbooks/init/version.yml4
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml1
-rw-r--r--playbooks/openstack/advanced-configuration.md106
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml1
-rwxr-xr-xplaybooks/openstack/sample-inventory/inventory.py6
12 files changed, 155 insertions, 22 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index ef8233b67..6d82fa928 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -17,6 +17,8 @@
- name: Create service signer certificate
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Create remote temp directory for creating certs
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index ffb11670d..8392e21ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -51,13 +51,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
until: not (l_docker_upgrade_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_docker_upgrade_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 412075d41..e89f06f17 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -22,6 +22,8 @@
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade all storage
command: >
@@ -49,10 +51,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
+ roles:
+ - openshift_facts
tasks:
- - import_role:
- name: openshift_facts
-
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
@@ -127,6 +128,7 @@
hosts: oo_masters_to_config
roles:
- { role: openshift_cli }
+ - { role: openshift_facts }
vars:
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
@@ -289,12 +291,18 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not (l_upgrade_control_plane_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_control_plane_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 464af3ae6..850442b3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -33,12 +33,18 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
post_tasks:
- import_role:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 6d59bfd0b..e259b5d09 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -50,11 +50,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
delay: 5
failed_when:
- l_upgrade_nodes_drain_result is failed
- - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 49e691352..9c7688981 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,7 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+ - { role: openshift_facts }
tasks:
- name: Check for invalid namespaces and SDN errors
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 0aea5069d..552bea5e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -41,13 +41,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml
index 8d1d61fde..962ee7220 100644
--- a/playbooks/init/version.yml
+++ b/playbooks/init/version.yml
@@ -6,7 +6,7 @@
- include_role:
name: openshift_version
tasks_from: first_master.yml
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}"
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
@@ -16,7 +16,7 @@
vars:
l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version }}"
+ l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
tasks:
- set_fact:
diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..56af18ca7 100644
--- a/playbooks/openshift-master/private/certificates-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
@@ -28,6 +28,7 @@
path: "{{ openshift.common.config_base }}/master/{{ item }}"
state: absent
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- "etcd.server.crt"
- "etcd.server.key"
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index 0eb51e4b5..afa56d168 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -372,6 +372,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos
Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
you cannot set this value to simply `openshift.example.com`.
+
+## Using Cinder-backed Persistent Volumes
+
+You will need to set up OpenStack credentials. You can try putting this in your
+`inventory/group_vars/OSEv3.yml`:
+
+ openshift_cloudprovider_kind: openstack
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}"
+ openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+ openshift_cloudprovider_openstack_blockstorage_version: v2
+
+**NOTE**: you must specify the Block Storage version as v2, because OpenShift
+does not support the v3 API yet and the version detection is currently not
+working properly.
+
+For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials].
+
+[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
+
+**NOTE** the OpenStack integration currently requires DNS to be configured and
+running and the `openshift_hostname` variable must match the Nova server name
+for each node. The cluster deployment will fail without it. If you use the
+provided OpenStack dynamic inventory and configure the
+`openshift_openstack_dns_nameservers` Ansible variable, this will be handled
+for you.
+
+After a successful deployment, the cluster is configured for Cinder persistent
+volumes.
+
+### Validation
+
+1. Log in and create a new project (with `oc login` and `oc new-project`)
+2. Create a file called `cinder-claim.yaml` with the following contents:
+
+```yaml
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+ name: "claim1"
+spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+```
+3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift
+4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound`
+5. Run `openstack volume list`
+ * A new volume called `kubernetes-dynamic-pvc-UUID` should be created
+ * Its size should be `1`
+ * It should not be attached to any server
+6. Create a file called `mysql-pod.yaml` with the following contents:
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mysql
+ labels:
+ name: mysql
+spec:
+ containers:
+ - resources:
+ limits :
+ cpu: 0.5
+ image: openshift/mysql-55-centos7
+ name: mysql
+ env:
+ - name: MYSQL_ROOT_PASSWORD
+ value: yourpassword
+ - name: MYSQL_USER
+ value: wp_user
+ - name: MYSQL_PASSWORD
+ value: wp_pass
+ - name: MYSQL_DATABASE
+ value: wp_db
+ ports:
+ - containerPort: 3306
+ name: mysql
+ volumeMounts:
+ - name: mysql-persistent-storage
+ mountPath: /var/lib/mysql/data
+ volumes:
+ - name: mysql-persistent-storage
+ persistentVolumeClaim:
+ claimName: claim1
+```
+
+7. Run `oc create -f mysql-pod.yaml` to create the pod
+8. Run `oc describe pod mysql`
+ * Its events should show that the pod has successfully attached the volume above
+ * It should show no errors
+ * `openstack volume list` should show the volume attached to an OpenShift app node
+ * NOTE: this can take several seconds
+9. After a while, `oc get pod` should show the `mysql` pod as running
+10. Run `oc delete pod mysql` to remove the pod
+ * The Cinder volume should no longer be attached
+11. Run `oc delete pvc claim1` to remove the volume claim
+ * The Cinder volume should be deleted
+
+
+
## Creating and using a Cinder volume for the OpenShift registry
You can optionally have the playbooks create a Cinder volume and set
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 481807dc9..a8663f946 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -20,6 +20,7 @@ openshift_hosted_registry_wait: True
#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_blockstorage_version: v2
## Use Cinder volume for Openshift registry:
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py
index 45cc4e15a..76e658eb7 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/sample-inventory/inventory.py
@@ -89,13 +89,15 @@ def build_inventory():
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
+ hostvars['openshift_ip'] = server.private_v4
+
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
- hostvars['openshift_ip'] = server.private_v4
- hostvars['openshift_hostname'] = server.private_v4
+ hostvars['openshift_hostname'] = server.metadata.get(
+ 'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':