summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml77
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml4
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml21
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j22
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml13
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml115
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml4
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/noc/create_host.yml4
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml2
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml2
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j223
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml78
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml52
-rw-r--r--playbooks/adhoc/setupnfs.yml21
-rw-r--r--playbooks/adhoc/uninstall.yml238
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml2
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml6
-rw-r--r--playbooks/aws/ansible-tower/config.yml2
-rw-r--r--playbooks/aws/ansible-tower/launch.yml5
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml40
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml21
-rw-r--r--playbooks/aws/openshift-cluster/config.yml32
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml14
-rw-r--r--playbooks/aws/openshift-cluster/list.yml4
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml32
-rw-r--r--playbooks/aws/openshift-cluster/service.yml7
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml65
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j211
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml42
-rw-r--r--playbooks/aws/openshift-cluster/update.yml11
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml17
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml32
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml17
-rw-r--r--playbooks/byo/openshift-cluster/config.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md21
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml13
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml13
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml15
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml19
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
l---------playbooks/byo/openshift-master/roles1
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml8
l---------playbooks/byo/openshift-node/filter_plugins1
l---------playbooks/byo/openshift-node/lookup_plugins1
l---------playbooks/byo/openshift-node/roles1
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml8
-rw-r--r--playbooks/byo/openshift_facts.yml4
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml56
-rw-r--r--playbooks/common/openshift-cluster/config.yml72
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml103
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh8
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py158
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/roles1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml112
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml648
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml88
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml140
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml252
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml159
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml26
-rw-r--r--playbooks/common/openshift-docker/config.yml9
l---------playbooks/common/openshift-docker/filter_plugins1
l---------playbooks/common/openshift-docker/lookup_plugins1
l---------playbooks/common/openshift-docker/roles1
-rw-r--r--playbooks/common/openshift-etcd/config.yml37
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml202
-rwxr-xr-xplaybooks/common/openshift-master/library/modify_yaml.py95
-rw-r--r--playbooks/common/openshift-master/restart.yml151
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml39
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services.yml27
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-master/scaleup.yml55
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/config.yml6
l---------playbooks/common/openshift-nfs/filter_plugins1
l---------playbooks/common/openshift-nfs/lookup_plugins1
l---------playbooks/common/openshift-nfs/roles1
-rw-r--r--playbooks/common/openshift-nfs/service.yml18
-rw-r--r--playbooks/common/openshift-node/config.yml151
-rw-r--r--playbooks/common/openshift-node/scaleup.yml14
-rw-r--r--playbooks/common/openshift-node/service.yml2
-rw-r--r--playbooks/gce/openshift-cluster/add_nodes.yml43
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml21
-rw-r--r--playbooks/gce/openshift-cluster/config.yml33
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml45
-rw-r--r--playbooks/gce/openshift-cluster/list.yml19
-rw-r--r--playbooks/gce/openshift-cluster/service.yml13
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml45
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml60
-rw-r--r--playbooks/gce/openshift-cluster/update.yml9
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml22
-rw-r--r--playbooks/gce/openshift-cluster/wip.yml26
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml21
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml31
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml11
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml14
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml36
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml5
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data9
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml7
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml39
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml21
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml32
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml47
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml339
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml14
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml84
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml9
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml9
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml20
157 files changed, 5073 insertions, 598 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index 54d3ea278..c14d08e87 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -1,6 +1,9 @@
# This deletes *ALL* Docker images, and uninstalls OpenShift and
# Atomic Enterprise RPMs. It is primarily intended for use
# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
- hosts:
- OSEv3:children
@@ -8,59 +11,6 @@
sudo: yes
tasks:
- - service: name={{ item }} state=stopped
- with_items:
- - openvswitch
- - origin-master
- - origin-node
- - atomic-openshift-master
- - atomic-openshift-node
- - openshift-master
- - openshift-node
- - atomic-enterprise-master
- - atomic-enterprise-node
- - etcd
-
- - yum: name={{ item }} state=absent
- with_items:
- - openvswitch
- - etcd
- - origin
- - origin-master
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-origin-node
- - atomic-openshift
- - atomic-openshift-master
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - tuned-profiles-atomic-openshift-node
- - atomic-enterprise
- - atomic-enterprise-master
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - openshift
- - openshift-master
- - openshift-node
- - openshift-sdn-ovs
- - tuned-profiles-openshift-node
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- shell: docker ps -a -q | xargs docker stop
changed_when: False
failed_when: False
@@ -73,27 +23,6 @@
changed_when: False
failed_when: False
- - file: path={{ item }} state=absent
- with_items:
- - /etc/openshift-sdn
- - /root/.kube
- - /etc/origin
- - /etc/atomic-enterprise
- - /etc/openshift
- - /var/lib/origin
- - /var/lib/openshift
- - /var/lib/atomic-enterprise
- - /etc/sysconfig/origin-master
- - /etc/sysconfig/origin-node
- - /etc/sysconfig/atomic-openshift-master
- - /etc/sysconfig/atomic-openshift-node
- - /etc/sysconfig/openshift-master
- - /etc/sysconfig/openshift-node
- - /etc/sysconfig/atomic-enterprise-master
- - /etc/sysconfig/atomic-enterprise-node
- - /etc/etcd
- - /var/lib/etcd
-
- user: name={{ item }} state=absent remove=yes
with_items:
- alice
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..471c41f16
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,4 @@
+- hosts: OSEv3
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 4f0ef7a75..81c1ee653 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -1,20 +1,21 @@
---
-#example run:
+#example run:
# ansible-playbook -e "cli_volume_size=1" \
# -e "cli_device_name=/dev/xvdf" \
# -e "cli_hosttype=master" \
-# -e "cli_environment=ops" \
+# -e "cli_clusterid=ops" \
# create_pv.yaml
-# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
#
- name: Create a volume and attach it to master
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars:
cli_volume_type: gp2
cli_volume_iops: ''
oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
- intersect(groups['tag_environment_' ~ cli_environment]) |
+ intersect(groups['oo_clusterid_' ~ cli_clusterid]) |
first }}"
pre_tasks:
- fail:
@@ -24,7 +25,7 @@
- cli_volume_size
- cli_device_name
- cli_hosttype
- - cli_environment
+ - cli_clusterid
- name: set oo_name fact
set_fact:
@@ -55,7 +56,7 @@
args:
tags:
Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
- env: "{{cli_environment}}"
+ clusterid: "{{cli_clusterid}}"
register: voltags
- debug: var=voltags
@@ -103,7 +104,7 @@
filesystem:
dev: "{{ cli_device_name }}"
fstype: ext4
-
+
- name: Mount the dev
mount:
name: "{{ pv_mntdir }}"
@@ -112,7 +113,7 @@
state: mounted
- name: chgrp g+rwXs
- file:
+ file:
path: "{{ pv_mntdir }}"
mode: 'g+rwXs'
recurse: yes
@@ -149,11 +150,11 @@
# We have to use the shell module because we can't set env vars with the command module.
- name: "Place PV into oc"
- shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ shell: "KUBECONFIG=/etc/origin/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
register: oc_output
- debug: var=oc_output
- - fail:
+ - fail:
msg: "Failed to add {{ pv_template }} to master."
when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
index 5654ef6c4..df082614b 100644
--- a/playbooks/adhoc/create_pv/pv-template.j2
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -10,7 +10,7 @@ spec:
storage: {{ vol_size }}Gi
accessModes:
- ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
+ persistentVolumeReclaimPolicy: Retain
awsElasticBlockStore:
volumeID: aws://{{ vol_az }}/{{ vol_id }}
fsType: ext4
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index c9ae923bb..4d32fc40b 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -27,9 +27,8 @@
gather_facts: no
vars:
- cli_volume_type: io1
+ cli_volume_type: gp2
cli_volume_size: 30
- cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -104,7 +103,6 @@
volume_size: "{{ cli_volume_size | default(30, True)}}"
volume_type: "{{ cli_volume_type }}"
device_name: /dev/xvdb
- iops: "{{ 30 * cli_volume_size }}"
register: vol
- debug: var=vol
@@ -115,7 +113,7 @@
args:
tags:
Name: "{{ ec2_tag_Name }}"
- env: "{{ ec2_tag_environment }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
register: voltags
- name: Wait for volume to attach
@@ -142,10 +140,3 @@
- debug: var=dockerstart
- - name: Wait for docker to stabilize
- pause:
- seconds: 30
-
- # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- - name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..72fcd77b3
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
+ - name: start docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
+
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
index 1946a5f4f..b6dde357e 100644
--- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -52,12 +52,12 @@
ignore_errors: yes
- name: Remove non-running docker images
- shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null"
+ shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
ignore_errors: yes
# leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+ command: docker exec -i oso-rhel7-host-monitoring /usr/local/bin/cron-send-docker-metrics.py
# Get and show docker info again.
- name: Get docker info
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..d24e9cafa
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 200
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 43200
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
index d250e6e69..2d2cae2b5 100644
--- a/playbooks/adhoc/noc/create_host.yml
+++ b/playbooks/adhoc/noc/create_host.yml
@@ -1,6 +1,8 @@
---
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
@@ -23,6 +25,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
index c0ec57ce1..8ad5fa0e2 100644
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ b/playbooks/adhoc/noc/create_maintenance.yml
@@ -2,6 +2,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a maintenace object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 4b94fa228..79cae24ab 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -1,6 +1,8 @@
---
- name: 'Get current hosts who have triggers that are alerting by trigger description'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
new file mode 100644
index 000000000..10454ad11
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -0,0 +1,23 @@
+version: 0.1
+log:
+ level: debug
+http:
+ addr: :5000
+storage:
+ cache:
+ layerinfo: inmemory
+ s3:
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
+ region: {{ aws_bucket_region }}
+ bucket: {{ aws_bucket_name }}
+ encrypt: true
+ secure: true
+ v4auth: true
+ rootdirectory: /registry
+auth:
+ openshift:
+ realm: openshift
+middleware:
+ repository:
+ - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
new file mode 100644
index 000000000..daf84e242
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -0,0 +1,78 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master
+ remote_user: root
+ gather_facts: False
+
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+ aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}"
+ aws_bucket_region: "{{ aws_region | default(lookup('env', 'S3_REGION') | default('us-east-1', true)) }}"
+ aws_create_bucket: "{{ aws_create | default(True) }}"
+ aws_tmp_path: "{{ aws_tmp_pathfile | default('/root/config.yml')}}"
+ aws_delete_tmp_file: "{{ aws_delete_tmp | default(True) }}"
+
+ tasks:
+
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
+ - name: Create S3 bucket
+ when: aws_create_bucket | bool
+ local_action:
+ module: s3 bucket="{{ aws_bucket_name }}" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+ - name: Generate docker registry config
+ template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+ - name: Determine if new secrets are needed
+ command: oc get secrets
+ register: secrets
+
+ - name: Create registry secrets
+ command: oc secrets new dockerregistry /root/config.yml
+ when: "'dockerregistry' not in secrets.stdout"
+
+ - name: Determine if service account contains secrets
+ command: oc describe serviceaccount/registry
+ register: serviceaccount
+
+ - name: Add secrets to registry service account
+ command: oc secrets add serviceaccount/registry secrets/dockerregistry
+ when: "'dockerregistry' not in serviceaccount.stdout"
+
+ - name: Determine if deployment config contains secrets
+ command: oc volume dc/docker-registry --list
+ register: dc
+
+ - name: Add secrets to registry deployment config
+ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+ when: "'dockersecrets' not in dc.stdout"
+
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
+ - name: Scale up registry
+ command: oc scale --replicas=1 dc/docker-registry
+
+ - name: Delete temporary config file
+ file: path={{ aws_tmp_path }} state=absent
+ when: aws_delete_tmp_file | bool
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
new file mode 100755
index 000000000..08e8f8968
--- /dev/null
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -0,0 +1,52 @@
+#!/usr/bin/ansible-playbook
+---
+#example run:
+# ansible-playbook -e "host=ops-node-compute-abcde" oo-sdn-restart.yml
+#
+
+- name: Check vars
+ hosts: localhost
+ gather_facts: false
+
+ pre_tasks:
+ - fail:
+ msg: "Playbook requires host to be set"
+ when: host is not defined or host == ''
+
+- name: Restart openshift/docker (and monitoring containers)
+ hosts: oo_version_3:&oo_name_{{ host }}
+ gather_facts: false
+ user: root
+
+ tasks:
+ - name: stop openshift/docker
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: restart openvswitch
+ service:
+ name: openvswitch
+ state: restarted
+
+ - name: wait 5 sec
+ pause:
+ seconds: 5
+
+ - name: start openshift/docker
+ service:
+ name: "{{ item }}"
+ state: started
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: start monitoring containers
+ service:
+ name: "{{ item }}"
+ state: restarted
+ with_items:
+ - oso-rhel7-host-monitoring
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
new file mode 100644
index 000000000..5f3631fcf
--- /dev/null
+++ b/playbooks/adhoc/setupnfs.yml
@@ -0,0 +1,21 @@
+---
+### This playbook is old and we are currently not using NFS.
+- hosts: tag_Name_nfs-v3-stg
+ sudo: no
+ remote_user: root
+ gather_facts: no
+ roles:
+ - role: openshift_storage_nfs_lvm
+ mount_dir: /exports/stg-black
+ volume_prefix: "kwoodsontest"
+ volume_size: 5
+ volume_num_start: 222
+ number_of_volumes: 3
+ tasks:
+ - fetch:
+ dest: json/
+ src: /root/"{{ item }}"
+ with_items:
+ - persistent-volume.kwoodsontest5g0222.json
+ - persistent-volume.kwoodsontest5g0223.json
+ - persistent-volume.kwoodsontest5g0224.json
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..680964d80
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,238 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ # Since we're not calling openshift_facts we'll do this for now
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+ - set_fact:
+ is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - atomic-openshift-node
+ - etcd
+ - haproxy
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - openshift-node
+ - openvswitch
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - origin-node
+ - pcsd
+ failed_when: false
+
+ - name: Stop additional atomic services
+ service: name={{ item }} state=stopped
+ when: is_containerized | bool
+ with_items:
+ - etcd_container
+ failed_when: false
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - corosync
+ - etcd
+ - haproxy
+ - kubernetes-client
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - pacemaker
+ - pcs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
+ - openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - name: Remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: restart docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/corosync
+ - /etc/etcd
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/systemd/system/atomic-openshift-master.service
+ - /etc/systemd/system/atomic-openshift-master-api.service
+ - /etc/systemd/system/atomic-openshift-master-controllers.service
+ - /etc/systemd/system/atomic-openshift-node.service
+ - /etc/systemd/system/etcd_container.service
+ - /etc/systemd/system/openvswitch.service
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-master-api
+ - /etc/sysconfig/atomic-enterprise-master-controllers
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /etc/sysconfig/origin-node
+ - /etc/systemd/system/atomic-openshift-node.service.wants
+ - /root/.kube
+ - /run/openshift-sdn
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/etcd
+ - /var/lib/openshift
+ - /var/lib/origin
+ - /var/lib/pacemaker
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
+
+- hosts: nodes
+ sudo: yes
+ tasks:
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
index 1e884240a..09f7c76cc 100644
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -1,6 +1,8 @@
---
- hosts: localhost
gather_facts: no
+ connection: local
+ become: no
vars:
g_server: http://localhost:8080/zabbix/api_jsonrpc.php
g_user: ''
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
index e2b8150c6..2f1d003ff 100755
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -2,12 +2,18 @@
---
- hosts: localhost
gather_facts: no
+ connection: local
+ become: no
vars:
g_server: http://localhost/zabbix/api_jsonrpc.php
g_user: Admin
g_password: zabbix
+ g_zbx_scriptrunner_user: scriptrunner
+ g_zbx_scriptrunner_bastion_host: specialhost.example.com
roles:
- role: os_zabbix
ozb_server: "{{ g_server }}"
ozb_user: "{{ g_user }}"
ozb_password: "{{ g_password }}"
+ ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
+ ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml
index efd1b9911..eb3f1a1da 100644
--- a/playbooks/aws/ansible-tower/config.yml
+++ b/playbooks/aws/ansible-tower/config.yml
@@ -2,6 +2,8 @@
- name: "populate oo_hosts_to_config host group if needed"
hosts: localhost
gather_facts: no
+ connection: local
+ become: no
tasks:
- name: Evaluate oo_host_group_exp if it's set
add_host: "name={{ item }} groups=oo_hosts_to_config"
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 850238ffb..d40529435 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -2,6 +2,7 @@
- name: Launch instance(s)
hosts: localhost
connection: local
+ become: no
gather_facts: no
vars:
@@ -71,8 +72,8 @@
tasks:
- - name: Yum update
- yum: name=* state=latest
+ - name: Update All Things
+ action: "{{ ansible_pkg_mgr }} name=* state=latest"
# Apply the configs, seprate so that just the configs can be run by themselves
- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
new file mode 100644
index 000000000..3d88e6b23
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/add_nodes.yml
@@ -0,0 +1,40 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
+ vars:
+ oo_extend_env: True
+ tasks:
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+- include: scaleup.yml
+- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
new file mode 100644
index 000000000..119b376aa
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -0,0 +1,21 @@
+---
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+
+g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
+
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+
+g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index a8e3e27bb..9fba856a2 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,23 +1,21 @@
----
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact:
- g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
- include: ../../common/openshift-cluster/config.yml
+ vars_files:
+ - ../../aws/openshift-cluster/vars.yml
+ - ../../aws/openshift-cluster/cluster_hosts.yml
vars:
- g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
- g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
- g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
- g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
- g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 2
+ openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
+ openshift_registry_selector: 'type=infra'
+ openshift_router_selector: 'type=infra'
+ openshift_infra_nodes: "{{ g_infra_hosts }}"
+ openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}'
+ openshift_master_cluster_method: 'native'
+ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
+ os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
+ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index a89275597..15b83dfad 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -2,6 +2,7 @@
- name: Launch instance(s)
hosts: localhost
connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
@@ -11,7 +12,7 @@
msg: Deployment type not supported for aws provider yet
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -19,7 +20,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -27,7 +28,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -38,7 +39,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
@@ -55,9 +56,4 @@
when: master_names is defined and master_names.0 is defined
- include: update.yml
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
-
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 04fcdc0a1..8b41a355e 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -2,10 +2,12 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ connection: local
+ become: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
when: cluster_id == ''
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..7e3a47964
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -0,0 +1,32 @@
+---
+
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ become: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: "{{ groups.nodes_to_add }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: ../../common/openshift-cluster/scaleup.yml
+ vars_files:
+ - ../../aws/openshift-cluster/vars.yml
+ - ../../aws/openshift-cluster/cluster_hosts.yml
+ vars:
+ g_new_node_hosts: "{{ groups.nodes_to_add }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: "{{ debug_level }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
index 25cf48505..d5f7d6b19 100644
--- a/playbooks/aws/openshift-cluster/service.yml
+++ b/playbooks/aws/openshift-cluster/service.yml
@@ -1,9 +1,12 @@
---
- name: Call same systemctl command for openshift on all instance(s)
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- fail: msg="cluster_id is required to be injected in this playbook"
when: cluster_id is not defined
@@ -14,7 +17,7 @@
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ with_items: "{{ master_hosts | default([]) }}"
- name: Evaluate g_service_nodes
add_host:
@@ -22,7 +25,7 @@
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ with_items: "{{ node_hosts | default([]) }}"
- include: ../../common/openshift-node/service.yml
- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index b77bcdc1a..63be06ecf 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -2,8 +2,8 @@
- set_fact:
created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
- env: "{{ cluster }}"
- env_host_type: "{{ cluster }}-openshift-{{ type }}"
+ cluster: "{{ cluster_id }}"
+ env: "{{ cluster_env }}"
host_type: "{{ type }}"
sub_host_type: "{{ g_sub_host_type }}"
@@ -20,10 +20,6 @@
| default(deployment_vars[deployment_type].image, true) }}"
when: ec2_image is not defined and not ec2_image_name
- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
- | default(deployment_vars[deployment_type].type, true) }}"
- when: ec2_instance_type is not defined
-- set_fact:
ec2_keypair: "{{ lookup('env', 'ec2_keypair')
| default(deployment_vars[deployment_type].keypair, true) }}"
when: ec2_keypair is not defined
@@ -37,27 +33,23 @@
when: ec2_assign_public_ip is not defined
- set_fact:
- ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ ec2_master_security_groups
- | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
+ ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
when: host_type == "master" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ ec2_etcd_security_groups
- | default(deployment_vars[deployment_type].security_groups, true)}}"
+ ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
+ ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
when: host_type == "etcd" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ ec2_infra_security_groups
- | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
+ ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
when: host_type == "node" and sub_host_type == "infra"
- set_fact:
- ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ ec2_node_security_groups
- | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
+ ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
when: host_type == "node" and sub_host_type == "compute"
- set_fact:
@@ -65,8 +57,7 @@
| default(deployment_vars[deployment_type].type, true) }}"
when: ec2_instance_type is not defined
- set_fact:
- ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
- | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: ec2_security_groups is not defined
- name: Find amis for deployment_type
@@ -81,7 +72,6 @@
- set_fact:
latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
volume_defs:
etcd:
root:
@@ -97,6 +87,10 @@
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+ docker:
+ volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
+ device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
node:
root:
volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
@@ -121,14 +115,13 @@
count: "{{ instances | length }}"
vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
- user_data: "{{ user_data }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
wait: yes
instance_tags:
created-by: "{{ created_by }}"
- environment: "{{ env }}"
- env: "{{ env }}"
+ clusterid: "{{ cluster }}"
+ environment: "{{ cluster_env }}"
host-type: "{{ host_type }}"
- env-host-type: "{{ env_host_type }}"
sub-host-type: "{{ sub_host_type }}"
volumes: "{{ volumes }}"
register: ec2
@@ -143,9 +136,8 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }},
- tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
- tag_sub-host-type_{{ sub_host_type }}"
+ instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }},
+ tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
- set_fact:
node_label:
@@ -172,6 +164,7 @@
- rotate 7
- compress
- sharedscripts
+ - missingok
scripts:
postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
@@ -190,6 +183,22 @@
- instances
- ec2.instances
+- name: Add new instances to nodes_to_add group if needed
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: nodes_to_add
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ openshift_node_labels: "{{ node_label }}"
+ logrotate_scripts: "{{ logrotate }}"
+ with_together:
+ - instances
+ - ec2.instances
+ when: oo_extend_env is defined and oo_extend_env | bool
+
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 82c2f4d57..3621a7d7d 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -1,5 +1,5 @@
#cloud-config
-{% if type =='etcd' %}
+{% if type == 'etcd' and 'etcd' in volume_defs[type] %}
cloud_config_modules:
- disk_setup
- mounts
@@ -19,7 +19,7 @@ fs_setup:
partition: auto
{% endif %}
-{% if type == 'node' %}
+{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
mounts:
- [ xvdb ]
- [ ephemeral0 ]
@@ -43,3 +43,10 @@ growpart:
runcmd:
- xfs_growfs /var
{% endif %}
+
+{% if deployment_vars[deployment_type].sudo %}
+- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
+ permissions: 440
+ content: |
+ Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
+{% endif %}
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 77287cad0..6dd5d8b62 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -1,23 +1,24 @@
---
- name: Terminate instance(s)
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env_{{ cluster_id }}
- add_host:
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+ with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
roles:
- role: rhel_unsubscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
@@ -25,36 +26,37 @@
- name: Terminate instances
hosts: localhost
connection: local
+ become: no
gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
tasks:
- name: Remove tags from instances
- ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
- args:
+ ec2_tag:
+ resource: "{{ hostvars[item]['ec2_id'] }}"
+ region: "{{ hostvars[item]['ec2_region'] }}"
+ state: absent
tags:
- env: "{{ item['ec2_tag_env'] }}"
- host-type: "{{ item['ec2_tag_host-type'] }}"
- env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
- sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
- with_items: host_vars
+ environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
+ clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
+ host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
+ sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
+ with_items: groups.oo_hosts_to_terminate
when: "'oo_hosts_to_terminate' in groups"
- name: Terminate instances
ec2:
state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
+ instance_ids: ["{{ hostvars[item].ec2_id }}"]
+ region: "{{ hostvars[item].ec2_region }}"
ignore_errors: yes
register: ec2_term
- with_items: host_vars
+ with_items: groups.oo_hosts_to_terminate
when: "'oo_hosts_to_terminate' in groups"
# Fail if any of the instances failed to terminate with an error other
# than 403 Forbidden
- - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}
- when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ - fail:
+ msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
with_items: ec2_term.results
- name: Stop instance if termination failed
@@ -63,7 +65,7 @@
instance_ids: ["{{ item.item.ec2_id }}"]
region: "{{ item.item.ec2_region }}"
register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.failed"
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
with_items: ec2_term.results
- name: Rename stopped instances
@@ -72,4 +74,4 @@
tags:
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
with_items: ec2_stop.results
- when: "'oo_hosts_to_terminate' in groups"
+ when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index e006aa74a..32bab76b5 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,19 +1,20 @@
---
-- name: Populate oo_hosts_to_update group
+- name: Update - Populate oo_hosts_to_update group
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- - name: Evaluate oo_hosts_to_update
+ - name: Update - Evaluate oo_hosts_to_update
add_host:
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
- | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
- | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..11026e38d
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,17 @@
+---
+# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
+# Usage:
+# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars_files:
+ - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
+ - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
+ vars:
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: "{{ debug_level }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 95bc4b3e2..ae12286bd 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,8 +1,23 @@
---
+debug_level: 2
+
+deployment_rhel7_ent_base:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m4.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+
deployment_vars:
origin:
# centos-7, requires marketplace
- image: ami-96a818fe
+ image: ami-61bbf104
image_name:
region: us-east-1
ssh_user: centos
@@ -24,15 +39,6 @@ deployment_vars:
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
- enterprise:
- # rhel-7.1, requires cloud access subscription
- image: ami-10663b78
- image_name:
- region: us-east-1
- ssh_user: ec2-user
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
new file mode 100644
index 000000000..8893db245
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -0,0 +1,17 @@
+---
+g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+
+g_lb_hosts: "{{ groups.lb | default([]) }}"
+
+g_master_hosts: "{{ groups.masters | default([]) }}"
+
+g_new_master_hosts: "{{ groups.new_masters | default([]) }}"
+
+g_node_hosts: "{{ groups.nodes | default([]) }}"
+
+g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
+
+g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+
+g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+ | union(g_lb_hosts) | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9e50a4a18..5887b3208 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,9 +1,8 @@
---
- include: ../../common/openshift-cluster/config.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
vars:
- g_etcd_group: "{{ 'etcd' }}"
- g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 2
+ openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
new file mode 100644
index 000000000..ca01dbc9d
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -0,0 +1,8 @@
+# Upgrade playbooks
+The playbooks provided in this directory can be used for upgrading an existing
+cluster. Additional notes for the associated upgrade playbooks are
+provided in their respective directories.
+
+# Upgrades available
+- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md)
+- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md)
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
new file mode 100644
index 000000000..c91a6cb96
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
@@ -0,0 +1,21 @@
+# v3.0 minor upgrade playbook
+**Note:** This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+## Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
new file mode 100644
index 000000000..628a07752
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -0,0 +1,13 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+ vars_files:
+ - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
new file mode 100644
index 000000000..eb1f481d7
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
@@ -0,0 +1,16 @@
+# v3.0 to v3.1 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..8fadd2ce7
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,13 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars_files:
+ - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
new file mode 100644
index 000000000..b230835c3
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
@@ -0,0 +1,17 @@
+# v3.1 minor upgrade playbook
+This upgrade will preserve all locally made configuration modifications to the
+Masters and Nodes.
+
+## Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..42078584b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,15 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
new file mode 100644
index 000000000..62577c3df
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
@@ -0,0 +1,16 @@
+# v3.1 to v3.2 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..0c91b51d6
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,19 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-master/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
new file mode 100644
index 000000000..a78a6aa3d
--- /dev/null
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -0,0 +1,4 @@
+---
+- include: ../../common/openshift-master/restart.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
new file mode 100644
index 000000000..18797d02a
--- /dev/null
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -0,0 +1,8 @@
+---
+- include: ../../common/openshift-master/scaleup.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/lookup_plugins b/playbooks/byo/openshift-node/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-node/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
new file mode 100644
index 000000000..0343597b5
--- /dev/null
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -0,0 +1,8 @@
+---
+- include: ../../common/openshift-node/scaleup.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 6d7c12fd4..916dfd0a6 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,10 +1,10 @@
---
- name: Gather Cluster facts
- hosts: all
- gather_facts: no
+ hosts: OSEv3
roles:
- openshift_facts
tasks:
- openshift_facts:
+ openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
register: result
- debug: var=result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index f564905ea..990ddd2f2 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
new file mode 100644
index 000000000..1ac78468a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -0,0 +1,56 @@
+- name: Configure flannel
+ hosts: oo_first_master
+ vars:
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ roles:
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+
+- name: Additional master configuration
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_examples
+ when: openshift.common.install_examples | bool
+ - role: openshift_cluster_metrics
+ when: openshift.common.use_cluster_metrics | bool
+ - role: openshift_manageiq
+ when: openshift.common.use_manageiq | bool
+ - role: cockpit
+ when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+ - role: pods
+ when: openshift.common.deployment_type == 'online'
+ - role: os_env_extras
+ when: openshift.common.deployment_type == 'online'
+
+- name: Create persistent volumes and create hosted services
+ hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
+ deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ - role: openshift_router
+ when: deploy_infra | bool
+ - role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when: deploy_infra | bool and attach_registry_volume | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4c74f96db..23c8f039e 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,74 +1,16 @@
---
-- name: Populate config host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - fail:
- msg: This playbook rquires g_etcd_group to be set
- when: g_etcd_group is not defined
+- include: evaluate_groups.yml
- - fail:
- msg: This playbook rquires g_masters_group to be set
- when: g_masters_group is not defined
+- include: validate_hostnames.yml
- - fail:
- msg: This playbook rquires g_nodes_group to be set
- when: g_nodes_group is not defined
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ groups[g_etcd_group][0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: ../openshift-docker/config.yml
- include: ../openshift-etcd/config.yml
+- include: ../openshift-nfs/config.yml
+
- include: ../openshift-master/config.yml
+- include: additional_config.yml
+
- include: ../openshift-node/config.yml
- vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
deleted file mode 100644
index e70709d19..000000000
--- a/playbooks/common/openshift-cluster/create_services.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
- hosts: "{{ g_svc_master }}"
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
new file mode 100644
index 000000000..432a92b49
--- /dev/null
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -0,0 +1,103 @@
+---
+- name: Populate config host groups
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook requires g_etcd_hosts to be set
+ when: g_etcd_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_lb_hosts to be set
+ when: g_lb_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_nfs_hosts to be set
+ when: g_nfs_hosts is not defined
+
+ - fail:
+ msg: The nfs group must be limited to one host
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
+
+ - name: Evaluate oo_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ when: g_etcd_hosts|length > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_master_hosts|length > 0
+
+ - name: Evaluate oo_lb_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_lb_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_lb_hosts | default([]) }}"
+
+ - name: Evaluate oo_nfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..1a6580795 100644
--- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 190e2d862..1474bb3ca 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
new file mode 100644
index 000000000..239f43314
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+set -e
+
+SERVICE_TYPE=$1
+DEPLOYMENT_TYPE=$2
+VERSION="v${3}"
+
+add_image_version_to_sysconfig () {
+ unit_name=$2
+ sysconfig_file=/etc/sysconfig/${unit_name}
+
+ if ! grep IMAGE_VERSION ${sysconfig_file}; then
+ sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file}
+ else
+ sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file}
+ fi
+}
+
+add_image_version_to_unit () {
+ deployment_type=$1
+ unit_file=$2
+
+ if ! grep IMAGE_VERSION $unit_file; then
+ image_namespace="openshift/"
+ if [ $deployment_type == "atomic-enterprise" ]; then
+ image_namespace="aep3/"
+ elif [ $deployment_type == "openshift-enterprise" ]; then
+ image_namespace="openshift3/"
+ fi
+
+ sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file
+ fi
+}
+
+for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service); do
+ unit_name=$(basename -s .service ${unit_file})
+ add_image_version_to_sysconfig $VERSION $unit_name
+ add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file
+done
+
+if [ -e /etc/sysconfig/openvswitch ]; then
+ add_image_version_to_sysconfig $VERSION openvswitch
+else
+ echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch
+fi
+if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then
+ sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service
+fi
+add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service
+
+systemctl daemon-reload
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
new file mode 100644
index 000000000..da6bcd23c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
+installed_container_name=$(basename -s .service ${unit_file})
+installed=$(docker exec ${installed_container_name} openshift version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+if [ ${1} == "origin" ]; then
+ image_name="openshift/origin"
+elif grep aep $unit_file 2>&1 > /dev/null; then
+ image_name="aep3/aep"
+elif grep openshift3 $unit_file 2>&1 > /dev/null; then
+ image_name="openshift3/ose"
+fi
+
+docker pull ${image_name} 2>&1 > /dev/null
+available=$(docker run --rm ${image_name} version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
new file mode 100644
index 000000000..e5c958ebb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+"""
+Pre-upgrade checks that must be run on a master before proceeding with upgrade.
+"""
+# This is a script not a python module:
+# pylint: disable=invalid-name
+
+# NOTE: This script should not require any python libs other than what is
+# in the standard library.
+
+__license__ = "ASL 2.0"
+
+import json
+import os
+import subprocess
+import re
+
+# The maximum length of container.ports.name
+ALLOWED_LENGTH = 15
+# The valid structure of container.ports.name
+ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
+AT_LEAST_ONE_LETTER = re.compile('[a-z]')
+# look at OS_PATH for the full path. Default ot 'oc'
+OC_PATH = os.getenv('OC_PATH', 'oc')
+
+
+def validate(value):
+ """
+ validate verifies that value matches required conventions
+
+ Rules of container.ports.name validation:
+
+ * must be less that 16 chars
+ * at least one letter
+ * only a-z0-9-
+ * hyphens can not be leading or trailing or next to each other
+
+ :Parameters:
+ - `value`: Value to validate
+ """
+ if len(value) > ALLOWED_LENGTH:
+ return False
+
+ if '--' in value:
+ return False
+
+ # We search since it can be anywhere
+ if not AT_LEAST_ONE_LETTER.search(value):
+ return False
+
+ # We match because it must start at the beginning
+ if not ALLOWED_CHARS.match(value):
+ return False
+ return True
+
+
+def list_items(kind):
+ """
+ list_items returns a list of items from the api
+
+ :Parameters:
+ - `kind`: Kind of item to access
+ """
+ response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
+ items = json.loads(response)
+ return items.get("items", [])
+
+
+def get(obj, *paths):
+ """
+ Gets an object
+
+ :Parameters:
+ - `obj`: A dictionary structure
+ - `path`: All other non-keyword arguments
+ """
+ ret_obj = obj
+ for path in paths:
+ if ret_obj.get(path, None) is None:
+ return []
+ ret_obj = ret_obj[path]
+ return ret_obj
+
+
+# pylint: disable=too-many-arguments
+def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
+ """
+ Prints out results in human friendly way.
+
+ :Parameters:
+ - `namespace`: Namespace of the resource
+ - `kind`: Kind of the resource
+ - `item_name`: Name of the resource
+ - `container_name`: Name of the container. May be "" when kind=Service.
+ - `port_name`: Name of the port
+ - `invalid_label`: The label of the invalid port. Port.name/targetPort
+ - `valid`: True if the port is valid
+ """
+ if not valid:
+ if len(container_name) > 0:
+ print('%s/%s -n %s (Container="%s" %s="%s")' % (
+ kind, item_name, namespace, container_name, invalid_label, port_name))
+ else:
+ print('%s/%s -n %s (%s="%s")' % (
+ kind, item_name, namespace, invalid_label, port_name))
+
+
+def print_validation_header():
+ """
+ Prints the error header. Should run on the first error to avoid
+ overwhelming the user.
+ """
+ print """\
+At least one port name is invalid and must be corrected before upgrading.
+Please update or remove any resources with invalid port names.
+
+ Valid port names must:
+
+ * be less that 16 characters
+ * have at least one letter
+ * contain only a-z0-9-
+ * not start or end with -
+ * not contain dashes next to each other ('--')
+"""
+
+
+def main():
+ """
+ main is the main entry point to this script
+ """
+ try:
+ # the comma at the end suppresses the newline
+ print "Checking for oc ...",
+ subprocess.check_output([OC_PATH, 'whoami'])
+ print "found"
+ except:
+ print(
+ 'Unable to run "%s whoami"\n'
+ 'Please ensure OpenShift is running, and "oc" is on your system '
+ 'path.\n'
+ 'You can override the path with the OC_PATH environment variable.'
+ % OC_PATH)
+ raise SystemExit(1)
+
+ # Where the magic happens
+ first_error = True
+ for kind, path in [
+ ('deploymentconfigs', ("spec", "template", "spec", "containers")),
+ ('replicationcontrollers', ("spec", "template", "spec", "containers")),
+ ('pods', ("spec", "containers"))]:
+ for item in list_items(kind):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for container in get(item, *path):
+ container_name = container["name"]
+ for port in get(container, "ports"):
+ port_name = port.get("name", None)
+ if not port_name:
+ # Unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, kind, item_name,
+ container_name, "Port.name", port_name, valid)
+
+ # Services follow a different flow
+ for item in list_items('services'):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for port in get(item, "spec", "ports"):
+ port_name = port.get("targetPort", None)
+ if isinstance(port_name, int) or port_name is None:
+ # Integer only or unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, "services", item_name, "",
+ "targetPort", port_name, valid)
+
+ # If we had at least 1 error then exit with 1
+ if not first_error:
+ raise SystemExit(1)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
new file mode 100644
index 000000000..a2a9579b5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
new file mode 120000
index 000000000..b1213dedb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
new file mode 100755
index 000000000..9a065fd1c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+"""Ansible module for modifying OpenShift configs during an upgrade"""
+
+import os
+import yaml
+
+DOCUMENTATION = '''
+---
+module: openshift_upgrade_config
+short_description: OpenShift Upgrade Config
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+def modify_api_levels(level_list, remove, ensure, msg_prepend='',
+ msg_append=''):
+ """ modify_api_levels """
+ changed = False
+ changes = []
+
+ if not isinstance(remove, list):
+ remove = []
+
+ if not isinstance(ensure, list):
+ ensure = []
+
+ if not isinstance(level_list, list):
+ new_list = []
+ changed = True
+ changes.append("%s created missing %s" % (msg_prepend, msg_append))
+ else:
+ new_list = level_list
+ for level in remove:
+ if level in new_list:
+ new_list.remove(level)
+ changed = True
+ changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
+
+ for level in ensure:
+ if level not in new_list:
+ new_list.append(level)
+ changed = True
+ changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
+
+ return {'new_list': new_list, 'changed': changed, 'changes': changes}
+
+
+def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
+ """Main upgrade method for 3.0 to 3.1."""
+ changes = []
+
+ # Facts do not get transferred to the hosts where custom modules run,
+ # need to make some assumptions here.
+ master_config = os.path.join(config_base, 'master/master-config.yaml')
+
+ master_cfg_file = open(master_config, 'r')
+ config = yaml.safe_load(master_cfg_file.read())
+ master_cfg_file.close()
+
+
+ # Remove unsupported api versions and ensure supported api versions from
+ # master config
+ unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
+ supported_levels = ['v1']
+
+ result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
+ supported_levels, 'master-config.yaml:', 'from apiLevels')
+ if result['changed']:
+ config['apiLevels'] = result['new_list']
+ changes.append(result['changes'])
+
+ if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig'].pop('apiLevels')
+ changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+
+ # Add masterCA to serviceAccountConfig
+ if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
+ config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
+
+ # Add proxyClientInfo to master-config
+ if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig']['proxyClientInfo'] = {
+ 'certFile': 'master.proxy-client.crt',
+ 'keyFile': 'master.proxy-client.key'
+ }
+ changes.append("master-config.yaml: added proxyClientInfo")
+
+ if len(changes) > 0:
+ if backup:
+ # TODO: Check success:
+ ansible_module.backup_local(master_config)
+
+ # Write the modified config:
+ out_file = open(master_config, 'w')
+ out_file.write(yaml.safe_dump(config, default_flow_style=False))
+ out_file.close()
+
+ return changes
+
+
+def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
+ """Upgrade entry point."""
+ if from_version == '3.0':
+ if to_version == '3.1':
+ return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
+
+
+def main():
+ """ main """
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(required=True),
+ from_version=dict(required=True, choices=['3.0']),
+ to_version=dict(required=True, choices=['3.1']),
+ role=dict(required=True, choices=['master']),
+ backup=dict(required=False, default=True, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ from_version = module.params['from_version']
+ to_version = module.params['to_version']
+ role = module.params['role']
+ backup = module.params['backup']
+ config_base = module.params['config_base']
+
+ try:
+ changes = []
+ if role == 'master':
+ changes = upgrade_master(module, config_base, from_version,
+ to_version, backup)
+
+ changed = len(changes) > 0
+ return module.exit_json(changed=changed, changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception, e:
+ return module.fail_json(msg=str(e))
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
new file mode 120000
index 000000000..aff753026
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/roles b/playbooks/common/openshift-cluster/upgrades/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
new file mode 100644
index 000000000..63c8ef756
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -0,0 +1,112 @@
+---
+- name: Evaluate groups
+ include: ../../evaluate_groups.yml
+
+- name: Re-Run cluster configuration to apply latest configuration changes
+ include: ../../config.yml
+
+- name: Upgrade masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade node packages
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+- name: Upgrade default router
+ hosts: oo_first_master
+ vars:
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+ hosts: oo_first_master
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: oo_first_master
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..6d7cefc8e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,648 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Evaluate host groups
+ include: ../../evaluate_groups.yml
+
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
+ gather_facts: no
+ tasks:
+ # Pacemaker is currently the only supported upgrade path for multiple masters
+ - fail:
+ msg: "openshift_master_cluster_method must be set to 'pacemaker'"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
+
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ # If this script errors out ansible will show the default stdout/stderr
+ # which contains details for the user:
+ - script: ../files/pre-upgrade-check
+
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - fail:
+ msg: This playbook requires Origin 1.0.6 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
+
+
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+
+- name: Update deployment type
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ deployment_type }}"
+
+- name: Update master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade to latest available kernel
+ action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
+
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ when: openshift_pkg_version is not defined
+
+ - name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
+ when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Upgrade master configuration
+ openshift_upgrade_config:
+ from_version: '3.0'
+ to_version: '3.1'
+ role: master
+ config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+ - set_fact:
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+ - set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+ - name: Check status of master certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items: openshift_master_certs
+ register: g_master_cert_stat_result
+
+ - set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list ) }}"
+ master_cert_subdir: master-{{ openshift.common.hostname }}
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+
+
+- name: Generate missing master certificates
+ hosts: oo_first_master
+ vars:
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
+ masters_needing_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+ | oo_filter_list(filter_attr='master_certs_missing') }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_master_certificates
+ post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
+ - name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
+ with_items: masters_needing_certs
+
+ - name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: masters_needing_certs
+
+
+- name: Sync generated certs, update service config and restart master services
+ hosts: oo_masters_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ tasks:
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+ dest: "{{ master_cert_config_dir }}"
+ when: inventory_hostname != groups.oo_first_master.0
+
+ - name: Restart master service
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Ensure the master service is enabled
+ service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
+ when: not openshift_master_ha | bool
+
+ - name: Check for configured cluster
+ stat:
+ path: /etc/corosync/corosync.conf
+ register: corosync_conf
+ when: openshift_master_ha | bool
+
+ - name: Destroy cluster
+ command: pcs cluster destroy --all
+ when: openshift_master_ha | bool and corosync_conf.stat.exists == true
+ run_once: true
+
+ - name: Start pcsd
+ service: name=pcsd enabled=yes state=started
+ when: openshift_master_ha | bool
+
+
+- name: Re-create cluster
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ when: openshift_pkg_version is not defined
+
+ - name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
+ when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - name: Ensure node service enabled
+ service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+
+ - name: Install Ceph storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+
+ - name: Install GlusterFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+
+ - name: Set sebooleans to allow gluster storage plugin access from containers
+ seboolean:
+ name: "{{ item }}"
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
+
+ - set_fact:
+ node_update_complete: True
+
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+
+###############################################################################
+# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Restart master cluster
+ command: pcs resource restart master
+ when: openshift_master_ha | bool
+ run_once: true
+
+ - name: Wait for the clustered master service to be available
+ wait_for:
+ host: "{{ openshift_master_cluster_vip }}"
+ port: 8443
+ state: started
+ timeout: 180
+ delay: 90
+ when: openshift_master_ha | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
+
+
+
+
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: all_routers.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - all_routers.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p
+ '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
+
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Switch to hostNetwork=true
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+ when: not openshift.common.version_gte_3_1_1_or_1_1_1
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+ when: openshift.common.version_gte_3_1_1_or_1_1_1
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
new file mode 100644
index 000000000..196393b2a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1.z, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
new file mode 100644
index 000000000..66935e061
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -0,0 +1,88 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..54bb251f7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,140 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_containerized | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.0'
+# to_version: '3.1'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
new file mode 100644
index 000000000..696994688
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -0,0 +1,9 @@
+- name: Update system_units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }}
+
+- name: Verifying the correct version was configured
+ command: grep {{ verify_upgrade_version }} {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
new file mode 100644
index 000000000..d9177e8a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
@@ -0,0 +1,14 @@
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+ register: docker_upgrade
+
+- name: Restart Docker
+ service: name=docker state=restarted
+ when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
new file mode 100644
index 000000000..3fd97ac14
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
new file mode 100644
index 000000000..c9afca559
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -0,0 +1,252 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ roles:
+ - openshift_cli
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_rpm_versions_result
+ when: not openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+ register: g_containerized_versions_result
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_image_tag | replace('v','') }}"
+ when: openshift_image_tag is defined
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
+
+ - name: Update systemd units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }}
+ when: openshift.common.is_containerized | bool
+
+ # Note: the version number is hardcoded here in hopes of catching potential
+ # bugs in how g_aos_versions.curr_version is set
+ - name: Verifying the correct version is installed for upgrade
+ shell: grep 3.1.1.6 {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ # TODO: this may only make sense for RPM installs. We probably need another check for containerized installs.
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - name: Determine available Docker
+ script: ../files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.9 or later
+ when: not openshift.common.is_atomic | bool
+ and (g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<'))
+
+ # TODO: add check to upgrade ostree to get latest Docker
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
new file mode 100644
index 000000000..7a2718e1b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -0,0 +1,6 @@
+- name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..d84d9f674
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,159 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+ # This will restart the node
+ - name: Restart openvswitch service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
new file mode 100644
index 000000000..fd82997b9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -0,0 +1,26 @@
+---
+- include: evaluate_groups.yml
+
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - shell:
+ getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
+ register: lookupip
+ changed_when: false
+ failed_when: false
+ - name: Warn user about bad openshift_hostname values
+ pause:
+ prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ when: lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml
new file mode 100644
index 000000000..092d5533c
--- /dev/null
+++ b/playbooks/common/openshift-docker/config.yml
@@ -0,0 +1,9 @@
+- name: Configure docker hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ vars:
+ docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+ docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
+ docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+ roles:
+ - openshift_facts
+ - openshift_docker
diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-docker/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-docker/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-docker/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-docker/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-docker/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 952960652..93eb157cb 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -13,6 +13,9 @@
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ - role: etcd
+ local_facts:
+ etcd_image: "{{ osm_etcd_image | default(None) }}"
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
@@ -22,7 +25,7 @@
- /etc/etcd/ca.crt
register: g_etcd_server_cert_stat_result
- set_fact:
- etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists')
+ etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
@@ -31,7 +34,7 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -66,8 +69,32 @@
validate_checksum: yes
with_items: etcd_needing_server_certs
-- name: Configure etcd hosts
- hosts: oo_etcd_to_config
+# Configure a first etcd host to avoid conflicts in choosing a leader
+# if other members come online too quickly.
+- name: Configure first etcd host
+ hosts: oo_first_etcd
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
+ etcd_url_scheme: https
+ etcd_peer_url_scheme: https
+ etcd_peers_group: oo_etcd_to_config
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ etcd_cert_config_dir }}"
+ state: directory
+ - name: Unarchive the tarball on the etcd host
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_server_certs_missing
+ roles:
+ - etcd
+ - role: nickhammond.logrotate
+
+# Configure the remaining etcd hosts, skipping the first one we dealt with above.
+- name: Configure remaining etcd hosts
+ hosts: oo_etcd_to_config:!oo_first_etcd
vars:
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
@@ -90,7 +117,7 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ g_etcd_mktemp.stdout }} state=absent
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index 0bf69b22f..fd2bc24ae 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -1,6 +1,8 @@
---
- name: Populate g_service_masters host group if needed
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 64cf7a65b..3c7d94c96 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -2,6 +2,25 @@
- name: Set master facts and determine if external etcd certs need to be generated
hosts: oo_masters_to_config
pre_tasks:
+ - name: Check for RPM generated config marker file .config_managed
+ stat:
+ path: /etc/origin/.config_managed
+ register: rpmgenerated_config
+
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ with_items:
+ - master
+ - node
+ - .config_managed
+
+ - set_fact:
+ openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
+ when: openshift_master_pod_eviction_timeout is not defined
+
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
@@ -9,6 +28,11 @@
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
+
+ - set_fact:
+ openshift_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') | default(openshift.common.debug_level, true) }}"
+ when: openshift_master_debug_level is not defined
+
roles:
- openshift_facts
post_tasks:
@@ -19,22 +43,32 @@
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: master
local_facts:
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
+ master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
+ - openshift_facts:
+ role: hosted
+ openshift_env:
+ openshift_hosted_registry_storage_kind: 'nfs'
+ when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -44,7 +78,7 @@
register: g_external_etcd_cert_stat_result
- set_fact:
etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
- | map(attribute='stat.exists')
+ | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -54,7 +88,7 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -68,6 +102,7 @@
etcd_generated_certs_dir: /etc/etcd/generated_certs
etcd_needing_client_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
+ | default([])
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
@@ -116,12 +151,13 @@
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- name: Determine if master certificates need to be generated
- hosts: oo_masters_to_config
+ hosts: oo_first_master:oo_masters_to_config
tasks:
- set_fact:
openshift_master_certs_no_etcd:
- admin.crt
- master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- master.server.crt
- openshift-master.crt
- openshift-registry.crt
@@ -129,6 +165,7 @@
- etcd.server.crt
openshift_master_certs_etcd:
- master.etcd-client.crt
+
- set_fact:
openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
@@ -138,11 +175,16 @@
with_items: openshift_master_certs
register: g_master_cert_stat_result
- set_fact:
- master_certs_missing: "{{ g_master_cert_stat_result.results
- | map(attribute='stat.exists')
- | list | intersect([false])}}"
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ - set_fact:
+ openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_nodes_with_label('region', 'infra')
+ | oo_collect('inventory_hostname') }}"
+ when: openshift_infra_nodes is not defined and groups.oo_nodes_to_config | default([]) | length > 0
- name: Configure master certificates
hosts: oo_first_master
@@ -151,6 +193,10 @@
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- openshift_master_certificates
@@ -172,6 +218,7 @@
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
with_items: masters_needing_certs
+
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
@@ -181,46 +228,138 @@
validate_checksum: yes
with_items: masters_needing_certs
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_limit_nofile: 100000
+ haproxy_global_maxconn: 20000
+ haproxy_default_maxconn: 20000
+ haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+ haproxy_frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+ default_backend: atomic-openshift-api
+ haproxy_backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}"
+ roles:
+ - role: openshift_facts
+ - role: haproxy
+ when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool
+
+- name: Check for cached session secrets
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+
+- name: Generate master session secrets
+ hosts: oo_first_master
+ vars:
+ g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
+ g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ session_auth_secrets: "{{ g_session_auth_secrets }}"
+ session_encryption_secrets: "{{ g_session_encryption_secrets }}"
+ when: not g_session_secrets_present | bool
+
+- name: Parse named certificates
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
+ named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
+ named_certificates_dir: "{{ hostvars[groups.oo_first_master.0].master_cert_config_dir }}/named_certificates/"
+ tasks:
+ - set_fact:
+ parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certificates_dir, internal_hostnames) }}"
+ when: named_certificates | length > 0
+
+- name: Deploy named certificates
+ hosts: oo_masters_to_config
+ vars:
+ named_certs_dir: "{{ master_cert_config_dir }}/named_certificates/"
+ named_certs_specified: "{{ openshift_master_named_certificates is defined }}"
+ overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ named_certificates: "{{ hostvars.localhost.parsed_named_certificates | default([]) }}"
+ additive_facts_to_overwrite:
+ - "{{ 'master.named_certificates' if overwrite_named_certs | bool else omit }}"
+ - name: Clear named certificates
+ file:
+ path: "{{ named_certs_dir }}"
+ state: absent
+ when: overwrite_named_certs | bool
+ - name: Ensure named certificate directory exists
+ file:
+ path: "{{ named_certs_dir }}"
+ state: directory
+ when: named_certs_specified | bool
+ - name: Land named certificates
+ copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
+ with_items: openshift_master_named_certificates
+ when: named_certs_specified | bool
+ - name: Land named certificate keys
+ copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}"
+ with_items: openshift_master_named_certificates
+ when: named_certs_specified | bool
+
- name: Configure master instances
hosts: oo_masters_to_config
+ any_errors_fatal: true
+ serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ openshift_master_ha: "{{ openshift.master.ha }}"
+ openshift_master_count: "{{ openshift.master.master_count }}"
+ openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
+ openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
path: "{{ openshift.common.config_base }}/master"
state: directory
- when: master_certs_missing and 'oo_first_master' not in group_names
+ when: master_certs_missing | bool and 'oo_first_master' not in group_names
- name: Unarchive the tarball on the master
unarchive:
src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
dest: "{{ master_cert_config_dir }}"
- when: master_certs_missing and 'oo_first_master' not in group_names
+ when: master_certs_missing | bool and 'oo_first_master' not in group_names
roles:
- openshift_master
- role: nickhammond.logrotate
- - role: fluentd_master
- when: openshift.common.use_fluentd | bool
+ - role: nuage_master
+ when: openshift.common.use_nuage | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-- name: Additional master configuration
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool
- - role: openshift_examples
- when: deployment_type in ['enterprise','openshift-enterprise','origin']
- - role: openshift_cluster_metrics
- when: openshift.common.use_cluster_metrics | bool
-
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
@@ -231,17 +370,8 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ g_master_mktemp.stdout }} state=absent
changed_when: False
-
-- name: Configure service accounts
- hosts: oo_first_master
-
- vars:
- accounts: ["router", "registry"]
-
- roles:
- - openshift_serviceaccounts
diff --git a/playbooks/common/openshift-master/library/modify_yaml.py b/playbooks/common/openshift-master/library/modify_yaml.py
new file mode 100755
index 000000000..a4be10ca3
--- /dev/null
+++ b/playbooks/common/openshift-master/library/modify_yaml.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+''' modify_yaml ansible module '''
+
+import yaml
+
+DOCUMENTATION = '''
+---
+module: modify_yaml
+short_description: Modify yaml key value pairs
+author: Andrew Butcher
+requirements: [ ]
+'''
+EXAMPLES = '''
+- modify_yaml:
+ dest: /etc/origin/master/master-config.yaml
+ yaml_key: 'kubernetesMasterConfig.masterCount'
+ yaml_value: 2
+'''
+
+def main():
+ ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting
+ the key to the desired value.
+ '''
+
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(required=True),
+ yaml_key=dict(required=True),
+ yaml_value=dict(required=True),
+ backup=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ yaml_key = module.params['yaml_key']
+ yaml_value = module.safe_eval(module.params['yaml_value'])
+ backup = module.params['backup']
+
+ # Represent null values as an empty string.
+ # pylint: disable=missing-docstring, unused-argument
+ def none_representer(dumper, data):
+ return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
+ yaml.add_representer(type(None), none_representer)
+
+ try:
+ changes = []
+
+ yaml_file = open(dest)
+ yaml_data = yaml.safe_load(yaml_file.read())
+ yaml_file.close()
+
+ ptr = yaml_data
+ for key in yaml_key.split('.'):
+ if key not in ptr and key != yaml_key.split('.')[-1]:
+ ptr[key] = {}
+ elif key == yaml_key.split('.')[-1]:
+ if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):
+ ptr[key] = yaml_value
+ changes.append((yaml_key, yaml_value))
+ else:
+ ptr = ptr[key]
+
+ if len(changes) > 0:
+ if backup:
+ module.backup_local(dest)
+ yaml_file = open(dest, 'w')
+ yaml_string = yaml.dump(yaml_data, default_flow_style=False)
+ yaml_string = yaml_string.replace('\'\'', '""')
+ yaml_file.write(yaml_string)
+ yaml_file.close()
+
+ return module.exit_json(changed=(len(changes) > 0), changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception, e:
+ return module.fail_json(msg=str(e))
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
new file mode 100644
index 000000000..02449e40d
--- /dev/null
+++ b/playbooks/common/openshift-master/restart.yml
@@ -0,0 +1,151 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Validate configuration for rolling restart
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - fail:
+ msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
+ when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+# Creating a temp file on localhost, we then check each system that will
+# be rebooted to see if that file exists, if so we know we're running
+# ansible on a machine that needs a reboot, and we need to error out.
+- name: Create temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - local_action: command mktemp
+ register: mktemp
+ changed_when: false
+
+- name: Check if temp file exists on any masters
+ hosts: oo_masters_to_config
+ tasks:
+ - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
+ register: exists
+ changed_when: false
+
+- name: Cleanup temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
+ changed_when: false
+
+- name: Warn if restarting the system where ansible is running
+ hosts: oo_masters_to_config
+ tasks:
+ - pause:
+ prompt: >
+ Warning: Running playbook from a host that will be restarted!
+ Press CTRL+C and A to abort playbook execution. You may
+ continue by pressing ENTER but the playbook will stop
+ executing after this system has been restarted and services
+ must be verified manually. To only restart services, set
+ openshift_master_rolling_restart_mode=services in host
+ inventory and relaunch the playbook.
+ when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ - set_fact:
+ current_host: "{{ exists.stat.exists }}"
+ when: openshift.common.rolling_restart_mode == 'system'
+
+- name: Determine which masters are currently active
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ tasks:
+ - name: Check master service status
+ command: >
+ systemctl is-active {{ openshift.common.service_type }}-master
+ register: active_check_output
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+ failed_when: false
+ changed_when: false
+ - set_fact:
+ is_active: "{{ active_check_output.stdout == 'active' }}"
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+
+- name: Evaluate master groups
+ hosts: localhost
+ become: no
+ tasks:
+ - fail:
+ msg: >
+ Did not receive active status from any masters. Please verify pacemaker cluster.
+ when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('is_active')
+ | list) }}"
+ - name: Evaluate oo_active_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_active_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['is_active'] | default(false)) | bool
+ - name: Evaluate oo_current_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_current_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['current_host'] | default(false)) | bool
+
+- name: Validate pacemaker cluster
+ hosts: oo_active_masters
+ tasks:
+ - name: Retrieve pcs status
+ command: pcs status
+ register: pcs_status_output
+ changed_when: false
+ - fail:
+ msg: >
+ Pacemaker cluster validation failed. One or more nodes are not online.
+ when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
+
+- name: Restart masters
+ hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart active masters
+ hosts: oo_active_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart current masters
+ hosts: oo_current_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
new file mode 100644
index 000000000..ff206f5a2
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -0,0 +1,39 @@
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+# When cluster_method != pacemaker we can ensure the api_port is
+# available.
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port=22
+ when: openshift.master.cluster_method == 'pacemaker'
+- name: Wait for master to become available
+ command: pcs status
+ register: pcs_status_output
+ until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
+ retries: 15
+ delay: 2
+ changed_when: false
+ when: openshift.master.cluster_method == 'pacemaker'
+- fail:
+ msg: >
+ Pacemaker cluster validation failed {{ inventory hostname }} is not online.
+ when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
new file mode 100644
index 000000000..c9219e8de
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
@@ -0,0 +1,25 @@
+- name: Fail over master resource
+ command: >
+ pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
new file mode 100644
index 000000000..5e539cd65
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -0,0 +1,27 @@
+- name: Restart master
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: restarted
+ when: not openshift_master_ha | bool
+- name: Restart master API
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: restarted
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Restart master controllers
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: restarted
+ # Ignore errrors since it is possible that type != simple for
+ # pre-3.1.1 installations.
+ ignore_errors: true
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
new file mode 100644
index 000000000..e738f3fb6
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml
@@ -0,0 +1,10 @@
+- name: Restart master services
+ command: pcs resource restart master
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
new file mode 100644
index 000000000..ccb1d23f1
--- /dev/null
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -0,0 +1,55 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Gather facts
+ hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+
+- name: Update master count
+ hosts: oo_masters:!oo_masters_to_config
+ serial: 1
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
+ master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
+ - name: Update master count
+ modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.masterCount'
+ yaml_value: "{{ openshift.master.master_count }}"
+ notify:
+ - restart master api
+ - restart master controllers
+ handlers:
+ - name: restart master api
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ notify: verify api server
+ - name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ - name: verify api server
+ command: >
+ curl -k --silent {{ openshift.master.api_url }}/healthz/ready
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+- name: Configure docker hosts
+ hosts: oo_masters_to-config:oo_nodes_to_config
+ vars:
+ docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+ docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
+ docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+ roles:
+ - openshift_facts
+ - openshift_docker
+
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 27e1e66f9..f60c5a2b5 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -2,6 +2,8 @@
- name: Populate g_service_masters host group if needed
hosts: localhost
gather_facts: no
+ connection: local
+ become: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
when: new_cluster_state is not defined
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
new file mode 100644
index 000000000..ba7530ed7
--- /dev/null
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Configure nfs hosts
+ hosts: oo_nfs_to_config
+ roles:
+ - role: openshift_facts
+ - role: openshift_storage_nfs
diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-nfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-nfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/common/openshift-nfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-nfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
new file mode 100644
index 000000000..20c8ca248
--- /dev/null
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_nfs host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_nfs
+ add_host: name={{ item }} groups=g_service_nfs
+ with_items: oo_host_group_exp | default([])
+
+- name: Change state on nfs instance(s)
+ hosts: g_service_nfs
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index a14ca8e11..c62167bd3 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,6 +1,10 @@
---
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
+ pre_tasks:
+ - set_fact:
+ openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') | default(openshift.common.debug_level, true) }}"
+ when: openshift_node_debug_level is not defined
roles:
- openshift_facts
tasks:
@@ -16,6 +20,7 @@
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ use_flannel: "{{ openshift_use_flannel | default(None) }}"
- role: node
local_facts:
labels: "{{ openshift_node_labels | default(None) }}"
@@ -33,16 +38,32 @@
- server.crt
register: stat_result
- set_fact:
- certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
+ certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
+ - name: Check status of flannel external etcd certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-ca.crt
+ register: g_external_etcd_flannel_cert_stat_result
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+ - set_fact:
+ etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ etcd_cert_prefix: node.etcd-
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -50,6 +71,65 @@
register: mktemp
changed_when: False
+- name: Configure flannel etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ pre_tasks:
+ - set_fact:
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ roles:
+ - role: etcd_certificates
+ when: openshift_use_flannel | default(false) | bool
+ post_tasks:
+ - name: Create a tarball of the etcd flannel certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
+- name: Copy the external etcd flannel certs to the nodes
+ hosts: oo_nodes_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/node"
+ state: directory
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-client.key
+ - node.etcd-ca.crt
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
- name: Create node certificates
hosts: oo_first_master
vars:
@@ -79,17 +159,15 @@
validate_checksum: yes
with_items: nodes_needing_certs
-- name: Configure node instances
+- name: Deploy node certificates
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- pre_tasks:
+ tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
-
# TODO: notify restart node
# possibly test service started time against certificate/config file
# timestamps in node to trigger notify
@@ -98,11 +176,51 @@
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
dest: "{{ node_cert_dir }}"
when: certs_missing
+
+- name: Evaluate node groups
+ hosts: localhost
+ become: no
+ connection: local
+ tasks:
+ - name: Evaluate oo_containerized_master_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: oo_containerized_master_nodes
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+ when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+
+- name: Configure node instances
+ hosts: oo_containerized_master_nodes
+ serial: 1
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ roles:
+ - openshift_node
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
roles:
- openshift_node
+
+- name: Additional node config
+ hosts: oo_nodes_to_config
+ vars:
+ # TODO: Prefix flannel role variables.
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ roles:
+ - role: flannel
+ when: openshift.common.use_flannel | bool
+ - role: nuage_node
+ when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
- - role: fluentd_node
- when: openshift.common.use_fluentd | bool
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
@@ -111,7 +229,7 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- sudo: false
+ become: no
gather_facts: no
tasks:
- file: name={{ mktemp.stdout }} state=absent
@@ -133,6 +251,19 @@
| oo_collect('openshift.common.hostname') }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
-
+ # Necessary because when you're on a node that's also a master the master will be
+ # restarted after the node restarts docker and it will take up to 60 seconds for
+ # systemd to start the master again
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --silent {{ openshift.master.api_url }}/healthz/ready
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
roles:
- openshift_manage_node
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
new file mode 100644
index 000000000..d36f7acea
--- /dev/null
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -0,0 +1,14 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Configure docker hosts
+ hosts: oo_nodes_to_config
+ vars:
+ docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+ docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
+ docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+ roles:
+ - openshift_facts
+ - openshift_docker
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 5cf83e186..0f07add2a 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -1,6 +1,8 @@
---
- name: Populate g_service_nodes host group if needed
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml
new file mode 100644
index 000000000..765e03fdc
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/add_nodes.yml
@@ -0,0 +1,43 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ vars:
+ oo_extend_env: True
+ tasks:
+ - fail:
+ msg: Deployment type not supported for gce provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
+
+- include: scaleup.yml
+- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
new file mode 100644
index 000000000..a7baea915
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -0,0 +1,21 @@
+---
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+
+g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
+
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+
+g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index fd5dfcc72..ba37a3a1f 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -1,24 +1,21 @@
---
-# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
-# /etc/sysconfig/iptables
-
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact:
- g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
- include: ../../common/openshift-cluster/config.yml
+ vars_files:
+ - ../../gce/openshift-cluster/vars.yml
+ - ../../gce/openshift-cluster/cluster_hosts.yml
vars:
- g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
- g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
- g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
- g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
- g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 2
+ openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
+ openshift_registry_selector: 'type=infra'
+ openshift_router_selector: 'type=infra'
+ openshift_infra_nodes: "{{ g_infra_hosts }}"
+ openshift_master_cluster_method: 'native'
+ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
+ os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
+ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 7a3b80da0..7532a678b 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -2,6 +2,7 @@
- name: Launch instance(s)
hosts: localhost
connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
@@ -9,15 +10,28 @@
- fail: msg="Deployment type not supported for gce provider yet"
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ etcd_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}"
+
+
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "default"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -27,34 +41,27 @@
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
- instances: "{{ infra_names }}"
+ instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
- - set_fact:
- a_infra: "{{ infra_names[0] }}"
- - add_host: name={{ a_infra }} groups=service_master
+ - add_host:
+ name: "{{ master_names.0 }}"
+ groups: service_master
+ when: master_names is defined and master_names.0 is defined
- include: update.yml
-- name: Deploy OpenShift Services
- hosts: service_master
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
-
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 5ba0f5a48..992033d16 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -1,24 +1,33 @@
---
- name: Generate oo_list_hosts group
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
when: cluster_id == ''
- add_host:
name: "{{ item }}"
groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
-- name: List instance(s)
+- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ become: no
+ connection: local
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
index 2d0f2ab95..914f38c1f 100644
--- a/playbooks/gce/openshift-cluster/service.yml
+++ b/playbooks/gce/openshift-cluster/service.yml
@@ -1,28 +1,29 @@
---
- name: Call same systemctl command for openshift on all instance(s)
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- fail: msg="cluster_id is required to be injected in this playbook"
when: cluster_id is not defined
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
- add_host:
name: "{{ item }}"
groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- add_host:
name: "{{ item }}"
groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- include: ../../common/openshift-node/service.yml
- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 6307ecc27..8ebf71cd4 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -1,42 +1,59 @@
---
-# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
-# the gce task to use the disk_auto_delete parameter to avoid having to delete
-# the disk as a separate step on termination
- name: Launch instance(s)
gce:
instance_names: "{{ instances }}"
- machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
- image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
+ machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
+ image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+ #service_account_permissions: "datastore,logging-write"
tags:
- created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
- - env-{{ cluster }}
+ - environment-{{ cluster_env }}
+ - clusterid-{{ cluster_id }}
- host-type-{{ type }}
- - sub-host-type-{{ sub_host_type }}
- - env-host-type-{{ cluster }}-openshift-{{ type }}
+ - sub-host-type-{{ g_sub_host_type }}
+ when: instances |length > 0
register: gce
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ g_sub_host_type }}"
+ when: instances |length > 0 and type == "node"
+
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ type }}"
+ when: instances |length > 0 and type != "node"
+
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
+ openshift_node_labels: "{{ node_label }}"
+ with_items: gce.instance_data | default([], true)
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
+ with_items: gce.instance_data | default([], true)
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
+ retries: 30
+ delay: 5
+ with_items: gce.instance_data | default([], true)
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 098b0df73..d835c53ba 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,25 +1,18 @@
---
- name: Terminate instance(s)
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
- add_host:
name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_nodes_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_masters_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -27,19 +20,40 @@
- vars.yml
roles:
- role: rhel_unsubscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
-- include: ../openshift-node/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- name: Terminate instances(s)
+ hosts: localhost
+ become: no
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ when: item is defined
-- include: ../openshift-master/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 8096aa654..2dc540978 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,19 +1,20 @@
---
- name: Populate oo_hosts_to_update group
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
name: "{{ item }}"
groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
- | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
- | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ae33083b9..d173213fc 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,15 +1,23 @@
---
+debug_level: 2
+
+deployment_rhel7_ent_base:
+ image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
+ machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
+ ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
+ sudo: yes
+
deployment_vars:
origin:
- image: centos-7
- ssh_user:
+ image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
+ machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
+ ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
sudo: yes
online:
image: libra-rhel7
+ machine_type: n1-standard-1
ssh_user: root
sudo: no
- enterprise:
- image: rhel-7
- ssh_user:
- sudo: yes
-
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/gce/openshift-cluster/wip.yml b/playbooks/gce/openshift-cluster/wip.yml
deleted file mode 100644
index 51a521a6b..000000000
--- a/playbooks/gce/openshift-cluster/wip.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: WIP
- hosts: localhost
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Evaluate oo_masters_for_deploy
- add_host:
- name: "{{ item }}"
- groups: oo_masters_for_deploy
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
-
-- name: Deploy OpenShift Services
- hosts: oo_masters_for_deploy
- connection: ssh
- gather_facts: yes
- user: root
- vars_files:
- - vars.yml
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
new file mode 100644
index 000000000..a7baea915
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -0,0 +1,21 @@
+---
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+
+g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
+
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+
+g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index c208eee81..0e003ef67 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,23 +2,22 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
-
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact:
- g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
- include: ../../common/openshift-cluster/config.yml
+ vars_files:
+ - ../../libvirt/openshift-cluster/vars.yml
+ - ../../libvirt/openshift-cluster/cluster_hosts.yml
vars:
- g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
- g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
- g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
- g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
- g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 2
+ openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_registry_selector: 'type=infra'
+ openshift_router_selector: 'type=infra'
+ openshift_infra_nodes: "{{ g_infra_hosts }}"
+ openshift_master_cluster_method: 'native'
+ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
+ os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
+ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index d3e768de5..3a48c82bc 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -1,6 +1,8 @@
---
- name: Launch instance(s)
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
@@ -11,13 +13,14 @@
image_url: "{{ deployment_vars[deployment_type].image.url }}"
image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
image_name: "{{ deployment_vars[deployment_type].image.name }}"
+ image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
tasks:
- fail: msg="Deployment type not supported for libvirt provider yet"
when: deployment_type == 'online'
- include: tasks/configure_libvirt.yml
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -25,7 +28,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -33,7 +36,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -44,7 +47,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index eaedc4d0d..6cb81ee79 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -1,11 +1,13 @@
---
- name: Generate oo_list_hosts group
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
when: cluster_id == ''
@@ -18,6 +20,14 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ become: no
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
index ae095f5a2..cd07c8701 100644
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ b/playbooks/libvirt/openshift-cluster/service.yml
@@ -5,6 +5,8 @@
- name: Call same systemctl command for openshift on all instance(s)
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
@@ -18,7 +20,7 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: g_service_masters
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ with_items: "{{ g_master_hosts | default([]) }}"
- name: Evaluate g_service_nodes
add_host:
@@ -26,7 +28,7 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: g_service_nodes
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ with_items: "{{ g_node_hosts | default([]) }}"
- include: ../../common/openshift-node/service.yml
- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
index 8a67d713f..397158b9e 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -4,13 +4,17 @@
dest: "{{ libvirt_storage_pool_path }}"
state: directory
+# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
- acl:
- default: yes
+ default: "{{ item }}"
entity: kvm
etype: group
name: "{{ libvirt_storage_pool_path }}"
permissions: rwx
state: present
+ with_items:
+ - no
+ - yes
- name: Test if libvirt storage pool for openshift already exists
command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 2a0c90b46..b00352539 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -13,8 +13,27 @@
get_url:
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | reject("equalto", "") | join(".") }}'
when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+ register: downloaded_image
+
+- name: Uncompress xz compressed base cloud image
+ command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ args:
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ when: image_compression in ["xz"] and downloaded_image.changed
+
+- name: Uncompress tgz compressed base cloud image
+ command: 'tar zxvf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ args:
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ when: image_compression in ["tgz"] and downloaded_image.changed
+
+- name: Uncompress gzip compressed base cloud image
+ command: 'gunzip {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ args:
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ when: image_compression in ["gz"] and downloaded_image.changed
- name: Create the cloud-init config drive path
file:
@@ -64,7 +83,7 @@
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
retries: 60
- delay: 1
+ delay: 3
when: instances | length != 0
- name: Collect IP addresses of the VMs
@@ -75,13 +94,24 @@
- set_fact:
ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
+- set_fact:
+ node_label:
+ type: "{{ g_sub_host_type }}"
+ when: instances | length > 0 and type == "node"
+
+- set_fact:
+ node_label:
+ type: "{{ type }}"
+ when: instances | length > 0 and type != "node"
+
- name: Add new instances
add_host:
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
+ openshift_node_labels: "{{ node_label }}"
with_together:
- instances
- ips
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index df200e374..0ca8e0974 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -3,9 +3,10 @@
<memory unit='GiB'>1</memory>
<metadata xmlns:ansible="https://github.com/ansible/ansible">
<ansible:tags>
- <ansible:tag>env-{{ cluster }}</ansible:tag>
- <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+ <ansible:tag>environment-{{ cluster_env }}</ansible:tag>
+ <ansible:tag>clusterid-{{ cluster }}</ansible:tag>
<ansible:tag>host-type-{{ type }}</ansible:tag>
+ <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
</ansible:tags>
</metadata>
<currentMemory unit='GiB'>1</currentMemory>
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 86dcd62bb..050bc7ab9 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -8,7 +8,7 @@
<!-- TODO: query for first available virbr interface available -->
<bridge name='virbr3' stp='on' delay='0'/>
<!-- TODO: make overridable -->
- <domain name='example.com'/>
+ <domain name='example.com' localOnly='yes' />
<dns>
<!-- TODO: automatically add host entries -->
</dns>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index 77b788109..ead881f78 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -3,7 +3,6 @@ disable_root: true
hostname: {{ item[0] }}
fqdn: {{ item[0] }}.example.com
-manage_etc_hosts: true
users:
- default
@@ -19,5 +18,11 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-bootcmd:
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
+
+runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index 8f00812a9..f4749c28d 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -3,11 +3,13 @@
- name: Terminate instance(s)
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: cluster_group=tag_env-{{ cluster_id }}
+ - set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
- add_host:
name: "{{ item }}"
groups: oo_hosts_to_terminate
@@ -21,13 +23,15 @@
- vars.yml
roles:
- role: rhel_unsubscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
- name: Terminate instance(s)
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index d09832c16..2dc540978 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -1,9 +1,12 @@
---
- name: Populate oo_hosts_to_update group
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -11,9 +14,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
- | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
- | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index c77a0797e..f28245f88 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -3,16 +3,33 @@ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-open
libvirt_storage_pool: 'openshift-ansible'
libvirt_network: openshift-ansible
libvirt_uri: 'qemu:///system'
+debug_level: 2
+
+# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
+# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
+deployment_rhel7_ent_base:
+ image:
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
+ compression: ""
+ ssh_user: openshift
+ sudo: yes
deployment_vars:
origin:
image:
url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+ default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
+ compression: "{{ lookup('oo_option', 'image_compression') |
+ default('xz', True) }}"
name: "{{ lookup('oo_option', 'image_name') |
default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}"
+ default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
sudo: yes
online:
@@ -22,18 +39,6 @@ deployment_vars:
sha256:
ssh_user: root
sudo: no
- enterprise:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
- ssh_user: openshift
- sudo: yes
-# origin:
-# fedora:
-# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
-# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
new file mode 100644
index 000000000..119b376aa
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -0,0 +1,21 @@
+---
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+
+g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
+
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+
+g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index a5ee2d6a5..093beaf03 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,20 +1,20 @@
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact:
- g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
+---
- include: ../../common/openshift-cluster/config.yml
+ vars_files:
+ - ../../openstack/openshift-cluster/vars.yml
+ - ../../openstack/openshift-cluster/cluster_hosts.yml
vars:
- g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
- g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
- g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
- g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
- g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 2
+ openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ ansible_default_ipv4.address }}"
+ openshift_registry_selector: 'type=infra'
+ openshift_router_selector: 'type=infra'
+ openshift_infra_nodes: "{{ g_infra_hosts }}"
+ openshift_master_cluster_method: 'native'
+ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
+ os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
+ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
new file mode 100644
index 000000000..5e7671a48
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/dns.yml
@@ -0,0 +1,47 @@
+- name: Populate oo_dns_hosts_to_update group
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ - cluster_hosts.yml
+ tasks:
+ - name: Evaluate oo_dns_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_dns_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: "{{ groups[cluster_id ~ '-dns'] }}"
+
+ - name: Evaluate oo_hosts_to_add_in_dns
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_add_in_dns
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}"
+
+- name: Gather facts
+ hosts: oo_hosts_to_add_in_dns
+ vars_files:
+ - vars.yml
+ - cluster_hosts.yml
+
+- name: Configure the DNS
+ hosts: oo_dns_hosts_to_update
+ vars_files:
+ - vars.yml
+ - cluster_hosts.yml
+ roles:
+ - role: rhel_subscribe
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+ - { role: dns,
+ dns_forwarders: "{{ openstack_network_dns }}",
+ dns_zones: [ novalocal, openstacklocal ],
+ dns_all_hosts: "{{ g_all_hosts }}" }
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 40e4ab22c..af774aa32 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -4,15 +4,20 @@ description: OpenShift cluster
parameters:
+ cluster_env:
+ type: string
+ label: Cluster environment
+ description: Environment of the cluster
+
cluster_id:
type: string
label: Cluster ID
description: Identifier of the cluster
- cidr:
+ subnet_24_prefix:
type: string
- label: CIDR
- description: CIDR of the network of the cluster
+ label: subnet /24 prefix
+ description: /24 subnet prefix of the network of the cluster (dot separated number triplet)
dns_nameservers:
type: comma_delimited_list
@@ -25,12 +30,6 @@ parameters:
description: Name of the external network
default: external
- floating_ip_pool:
- type: string
- label: Floating IP pool
- description: Floating IP pools
- default: external
-
ssh_public_key:
type: string
label: SSH public key
@@ -43,6 +42,11 @@ parameters:
description: Source of legitimate ssh connections
default: 0.0.0.0/0
+ num_etcd:
+ type: number
+ label: Number of etcd nodes
+ description: Number of etcd nodes
+
num_masters:
type: number
label: Number of masters
@@ -58,6 +62,11 @@ parameters:
label: Number of infrastructure nodes
description: Number of infrastructure nodes
+ etcd_image:
+ type: string
+ label: Etcd image
+ description: Name of the image for the etcd servers
+
master_image:
type: string
label: Master image
@@ -73,6 +82,16 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
+ dns_image:
+ type: string
+ label: DNS image
+ description: Name of the image for the DNS server
+
+ etcd_flavor:
+ type: string
+ label: Etcd flavor
+ description: Flavor of the etcd servers
+
master_flavor:
type: string
label: Master flavor
@@ -88,8 +107,25 @@ parameters:
label: Infra flavor
description: Flavor of the infra node servers
+ dns_flavor:
+ type: string
+ label: DNS flavor
+ description: Flavor of the DNS server
+
outputs:
+ etcd_names:
+ description: Name of the etcds
+ value: { get_attr: [ etcd, name ] }
+
+ etcd_ips:
+ description: IPs of the etcds
+ value: { get_attr: [ etcd, private_ip ] }
+
+ etcd_floating_ips:
+ description: Floating IPs of the etcds
+ value: { get_attr: [ etcd, floating_ip ] }
+
master_names:
description: Name of the masters
value: { get_attr: [ masters, name ] }
@@ -126,6 +162,26 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
+ dns_name:
+ description: Name of the DNS
+ value:
+ get_attr:
+ - dns
+ - name
+
+ dns_floating_ip:
+ description: Floating IP of the DNS
+ value:
+ get_attr:
+ - dns
+ - addresses
+ - str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ - 1
+ - addr
+
resources:
net:
@@ -146,8 +202,27 @@ resources:
params:
cluster_id: { get_param: cluster_id }
network: { get_resource: net }
- cidr: { get_param: cidr }
- dns_nameservers: { get_param: dns_nameservers }
+ cidr:
+ str_replace:
+ template: subnet_24_prefix.0/24
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ allocation_pools:
+ - start:
+ str_replace:
+ template: subnet_24_prefix.3
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ end:
+ str_replace:
+ template: subnet_24_prefix.254
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ dns_nameservers:
+ - str_replace:
+ template: subnet_24_prefix.2
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
router:
type: OS::Neutron::Router
@@ -220,6 +295,37 @@ resources:
port_range_min: 24224
port_range_max: 24224
+ etcd-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-etcd-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id etcd cluster
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2379
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2380
+ port_range_max: 2380
+ remote_mode: remote_group_id
+
node-secgrp:
type: OS::Neutron::SecurityGroup
properties:
@@ -240,16 +346,16 @@ resources:
port_range_max: 22
remote_ip_prefix: { get_param: ssh_incoming }
- direction: ingress
- protocol: udp
- port_range_min: 4789
- port_range_max: 4789
- remote_mode: remote_group_id
- - direction: ingress
protocol: tcp
port_range_min: 10250
port_range_max: 10250
remote_mode: remote_group_id
remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
infra-secgrp:
type: OS::Neutron::SecurityGroup
@@ -274,6 +380,76 @@ resources:
port_range_min: 443
port_range_max: 443
+ dns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-dns-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id cluster DNS
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: etcd-secgrp }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: node-secgrp }
+
+ etcd:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_etcd }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: etcd
+ cluster_env: { get_param: cluster_env }
+ cluster_id: { get_param: cluster_id }
+ type: etcd
+ image: { get_param: etcd_image }
+ flavor: { get_param: etcd_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: etcd-secgrp }
+ floating_network: { get_param: external_net }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on:
+ - interface
+
masters:
type: OS::Heat::ResourceGroup
properties:
@@ -287,22 +463,25 @@ resources:
params:
cluster_id: { get_param: cluster_id }
k8s_type: master
- cluster_id: { get_param: cluster_id }
- type: master
- image: { get_param: master_image }
- flavor: { get_param: master_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
+ cluster_env: { get_param: cluster_env }
+ cluster_id: { get_param: cluster_id }
+ type: master
+ image: { get_param: master_image }
+ flavor: { get_param: master_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
secgrp:
- { get_resource: master-secgrp }
- floating_network: { get_param: floating_ip_pool }
+ - { get_resource: node-secgrp }
+ floating_network: { get_param: external_net }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
params:
cluster_id: { get_param: cluster_id }
- depends_on: interface
+ depends_on:
+ - interface
compute_nodes:
type: OS::Heat::ResourceGroup
@@ -318,23 +497,25 @@ resources:
cluster_id: { get_param: cluster_id }
k8s_type: node
sub_host_type: compute
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: compute
- image: { get_param: node_image }
- flavor: { get_param: node_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
+ cluster_env: { get_param: cluster_env }
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: compute
+ image: { get_param: node_image }
+ flavor: { get_param: node_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
secgrp:
- { get_resource: node-secgrp }
- floating_network: { get_param: floating_ip_pool }
+ floating_network: { get_param: external_net }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
params:
cluster_id: { get_param: cluster_id }
- depends_on: interface
+ depends_on:
+ - interface
infra_nodes:
type: OS::Heat::ResourceGroup
@@ -350,21 +531,89 @@ resources:
cluster_id: { get_param: cluster_id }
k8s_type: node
sub_host_type: infra
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: infra
- image: { get_param: infra_image }
- flavor: { get_param: infra_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
+ cluster_env: { get_param: cluster_env }
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: infra
+ image: { get_param: infra_image }
+ flavor: { get_param: infra_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
secgrp:
- { get_resource: node-secgrp }
- { get_resource: infra-secgrp }
- floating_network: { get_param: floating_ip_pool }
+ floating_network: { get_param: external_net }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
params:
cluster_id: { get_param: cluster_id }
- depends_on: interface
+ depends_on:
+ - interface
+
+ dns:
+ type: OS::Nova::Server
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-dns
+ params:
+ cluster_id: { get_param: cluster_id }
+ key_name: { get_resource: keypair }
+ image: { get_param: dns_image }
+ flavor: { get_param: dns_flavor }
+ networks:
+ - port: { get_resource: dns-port }
+ user_data: { get_resource: dns-config }
+ user_data_format: RAW
+
+ dns-port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: net }
+ fixed_ips:
+ - subnet: { get_resource: subnet }
+ ip_address:
+ str_replace:
+ template: subnet_24_prefix.2
+ params:
+ subnet_24_prefix: { get_param: subnet_24_prefix }
+ security_groups:
+ - { get_resource: dns-secgrp }
+
+ dns-floating-ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: external_net }
+ port_id: { get_resource: dns-port }
+
+ dns-config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_file: user-data }
+ - config:
+ str_replace:
+ template: |
+ #cloud-config
+ write_files:
+ - path: /etc/sysconfig/network-scripts/ifcfg-eth0
+ content: |
+ DEVICE="eth0"
+ BOOTPROTO="dhcp"
+ DNS1="$dns1"
+ DNS2="$dns2"
+ PEERDNS="no"
+ ONBOOT="yes"
+ runcmd:
+ - [ "/usr/bin/systemctl", "restart", "network" ]
+ params:
+ $dns1:
+ get_param:
+ - dns_nameservers
+ - 0
+ $dns2:
+ get_param:
+ - dns_nameservers
+ - 1
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index 9dcab3e60..f83f2c984 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -9,6 +9,11 @@ parameters:
label: Name
description: Name
+ cluster_env:
+ type: string
+ label: Cluster environment
+ description: Environment of the cluster
+
cluster_id:
type: string
label: Cluster ID
@@ -105,14 +110,9 @@ resources:
user_data: { get_file: user-data }
user_data_format: RAW
metadata:
- env: { get_param: cluster_id }
+ environment: { get_param: cluster_env }
+ clusterid: { get_param: cluster_id }
host-type: { get_param: type }
- env-host-type:
- str_replace:
- template: cluster_id-openshift-type
- params:
- cluster_id: { get_param: cluster_id }
- type: { get_param: type }
sub-host-type: { get_param: subtype }
port:
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 651aef40b..0afcad72e 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -1,6 +1,7 @@
---
- name: Launch instance(s)
hosts: localhost
+ become: no
connection: local
gather_facts: no
vars_files:
@@ -28,22 +29,28 @@
- name: Create or Update OpenStack Stack
command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
+ --timeout 3
+ -P cluster_env={{ cluster_env }}
-P cluster_id={{ cluster_id }}
- -P cidr={{ openstack_network_cidr }}
+ -P subnet_24_prefix={{ openstack_subnet_24_prefix }}
-P dns_nameservers={{ openstack_network_dns | join(",") }}
-P external_net={{ openstack_network_external_net }}
- -P floating_ip_pool={{ openstack_floating_ip_pool }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
-P ssh_incoming={{ openstack_ssh_access_from }}
+ -P num_etcd={{ num_etcd }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
-P num_infra={{ num_infra }}
+ -P etcd_image={{ deployment_vars[deployment_type].image }}
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
-P infra_image={{ deployment_vars[deployment_type].image }}
+ -P dns_image={{ deployment_vars[deployment_type].image }}
+ -P etcd_flavor={{ openstack_flavor["etcd"] }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
+ -P dns_flavor=m1.small
openshift-ansible-{{ cluster_id }}-stack'
- name: Wait for OpenStack Stack readiness
@@ -51,8 +58,41 @@
register: stack_show_status_result
until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
retries: 30
- delay: 1
- failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+ delay: 5
+
+ - name: Display the stack resources
+ command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack'
+ register: stack_resource_list_result
+ when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+
+ - name: Display the stack status
+ command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
+ register: stack_show_result
+ when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+
+ - name: Delete the stack
+ command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
+ when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+
+ - fail:
+ msg: |
+
+ +--------------------------------------+
+ | ^ |
+ | /!\ Failed to create the heat stack |
+ | /___\ |
+ +--------------------------------------+
+
+ Here is the list of stack resources and their status:
+ {{ stack_resource_list_result.stdout }}
+
+ Here is the status of the stack:
+ {{ stack_show_result.stdout }}
+
+ ^ Failed to create the heat stack
+ /!\
+ /___\ Please check the `stack_status_reason` line in the above array to know why.
+ when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
- name: Read OpenStack Stack outputs
command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
@@ -61,13 +101,29 @@
- set_fact:
parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
+ - name: Add new etcd instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
+ openshift_node_labels:
+ type: "etcd"
+ with_together:
+ - parsed_outputs.etcd_names
+ - parsed_outputs.etcd_ips
+ - parsed_outputs.etcd_floating_ips
+
- name: Add new master instances groups and variables
add_host:
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default'
+ groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
+ openshift_node_labels:
+ type: "master"
with_together:
- parsed_outputs.master_names
- parsed_outputs.master_ips
@@ -79,7 +135,9 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute'
+ groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
+ openshift_node_labels:
+ type: "compute"
with_together:
- parsed_outputs.node_names
- parsed_outputs.node_ips
@@ -91,12 +149,22 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra'
+ groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
+ openshift_node_labels:
+ type: "infra"
with_together:
- parsed_outputs.infra_names
- parsed_outputs.infra_ips
- parsed_outputs.infra_floating_ips
+ - name: Add DNS groups and variables
+ add_host:
+ hostname: '{{ parsed_outputs.dns_name }}'
+ ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: '{{ cluster_id }}-dns'
+
- name: Wait for ssh
wait_for:
host: '{{ item }}'
@@ -105,6 +173,7 @@
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
- parsed_outputs.infra_floating_ips
+ - parsed_outputs.dns_floating_ip
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -116,6 +185,7 @@
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
- parsed_outputs.infra_floating_ips
+ - parsed_outputs.dns_floating_ip
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index a75e350c7..123ebd323 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -1,11 +1,13 @@
---
- name: Generate oo_list_hosts group
hosts: localhost
+ become: no
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
when: cluster_id == ''
@@ -19,6 +21,14 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ become: no
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 62df2be73..a1fb41b53 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -1,17 +1,17 @@
- name: Terminate instance(s)
hosts: localhost
+ become: no
connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: cluster_group=tag_env_{{ cluster_id }}
- add_host:
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[cluster_group] | default([])
+ with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -19,12 +19,13 @@
- vars.yml
roles:
- role: rhel_unsubscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
- hosts: localhost
+ become: no
connection: local
gather_facts: no
vars_files:
@@ -42,6 +43,6 @@
register: stack_show_result
until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
retries: 60
- delay: 1
+ delay: 5
failed_when: '"Stack not found" not in stack_show_result.stderr and
stack_show_result.stdout != "DELETE_COMPLETE"'
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index e006aa74a..16027b15c 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -1,9 +1,14 @@
---
+- include: dns.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars_files:
- vars.yml
+ - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -11,9 +16,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
- | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
- | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 262d3f4ed..ee26d223e 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -1,12 +1,11 @@
---
+debug_level: 2
openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
default('files/heat_stack.yaml', True) }}"
-openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) |
- default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}"
+openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) |
+ default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}"
openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
default('external', True) }}"
-openstack_floating_ip_pool: "{{ lookup('oo_option', 'floating_ip_pool' ) |
- default('external', True) }}"
openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
@@ -14,10 +13,16 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
+ etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
+deployment_rhel7_ent_base:
+ image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
+ ssh_user: openshift
+ sudo: yes
+
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
@@ -27,7 +32,6 @@ deployment_vars:
image:
ssh_user: root
sudo: no
- enterprise:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}"
- ssh_user: openshift
- sudo: yes
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"