summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml5
-rw-r--r--playbooks/container-runtime/private/config.yml6
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml6
-rw-r--r--playbooks/init/base_packages.yml5
-rw-r--r--playbooks/openshift-etcd/scaleup.yml1
-rw-r--r--playbooks/openshift-master/scaleup.yml1
-rw-r--r--playbooks/openshift-node/scaleup.yml1
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml2
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml15
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml25
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml4
-rw-r--r--roles/openshift_prometheus/tasks/uninstall_prometheus.yaml (renamed from roles/openshift_prometheus/tasks/uninstall.yaml)0
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_web_console/files/console-template.yaml15
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml7
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml2
18 files changed, 61 insertions, 39 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index ba783638d..a9a35b028 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -6,6 +6,7 @@
g_new_node_hosts: []
- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/base_packages.yml
- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 8792295c6..9c7677f1b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -15,6 +15,7 @@
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
## If they've specified pkg_version or image_tag preserve that for later use
@@ -125,8 +126,8 @@
- name: Restart master controllers to force new leader election mode
service:
name: "{{ openshift_service_type }}-master-controllers"
- state: restart
- when: openshift.common.rolling_restart_mode == 'service'
+ state: restarted
+ when: openshift.common.rolling_restart_mode == 'services'
- name: Re-enable master controllers to force new leader election mode
service:
name: "{{ openshift_service_type }}-master-controllers"
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 5396df20a..d5312de15 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -12,6 +12,12 @@
- role: container_runtime
tasks:
- import_role:
+ name: openshift_excluder
+ tasks_from: enable.yml
+ vars:
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_enable_openshift_excluder: false
+ - import_role:
name: container_runtime
tasks_from: package_docker.yml
when:
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
index 787de8ebc..0daf61122 100644
--- a/playbooks/gcp/openshift-cluster/build_image.yml
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -62,6 +62,12 @@
timeout: 120
with_items: "{{ gce.instance_data }}"
+- name: Wait for full SSH connection
+ hosts: nodes
+ gather_facts: no
+ tasks:
+ - wait_for_connection:
+
- hosts: nodes
tasks:
- name: Set facts
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 81f4dd183..addb4f44d 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -1,8 +1,9 @@
---
-# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_base_packages_hosts may be passed in via prerequisites.yml during scaleup plays
+# and upgrade_control_plane.yml upgrade plays.
- name: Install packages necessary for installer
- hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
+ hosts: "{{ l_base_packages_hosts | default('oo_all_hosts') }}"
any_errors_fatal: true
tasks:
- when:
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 656454fe3..1f8cb7391 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -32,6 +32,7 @@
l_build_container_groups_hosts: "oo_new_etcd_to_config"
l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_base_packages_hosts: "oo_new_etcd_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
when:
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 09e205afc..0ca5d1a61 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -32,6 +32,7 @@
- import_playbook: ../prerequisites.yml
vars:
l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_base_packages_hosts: "oo_nodes_to_config:oo_masters_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index 9cc7263b7..bda251fa5 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -27,6 +27,7 @@
- import_playbook: ../prerequisites.yml
vars:
l_scale_up_hosts: "oo_nodes_to_config"
+ l_base_packages_hosts: "oo_nodes_to_config"
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
index 2df39c2a8..b01f7f988 100644
--- a/playbooks/openshift-prometheus/private/uninstall.yml
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -5,4 +5,4 @@
- name: Run the Prometheus Uninstall Role Tasks
include_role:
name: openshift_prometheus
- tasks_from: uninstall
+ tasks_from: uninstall_prometheus
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 74877d5c7..c1cb37a3b 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -10,6 +10,19 @@
delay: 3
until: instancesout.instances|length > 0
+- name: fetch the ami used to create the instance
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ ami_id: "{{ instancesout.instances[0]['image_id'] }}"
+ register: original_ami_out
+ retries: 20
+ delay: 3
+ until: original_ami_out.results|length > 0
+
+- name: combine the tags of the original ami with newly created ami
+ set_fact:
+ l_openshift_aws_ami_tags: "{{ original_ami_out.results[0]['tags'] | combine(openshift_aws_ami_tags) }}"
+
- name: bundle ami
ec2_ami:
instance_id: "{{ instancesout.instances.0.instance_id }}"
@@ -17,7 +30,7 @@
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
name: "{{ openshift_aws_ami_name }}"
- tags: "{{ openshift_aws_ami_tags }}"
+ tags: "{{ l_openshift_aws_ami_tags }}"
wait: yes
register: amioutput
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index ce27e238f..a92b63979 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -1,25 +1,16 @@
---
-- set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - master.proxy-client.crt
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
-- set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd )) if openshift_master_etcd_hosts | length > 0 else openshift_master_certs_no_etcd }}"
-
- name: Check status of master certificates
stat:
path: "{{ openshift_master_config_dir }}/{{ item }}"
with_items:
- - "{{ openshift_master_certs }}"
+ - admin.crt
+ - ca.crt
+ - ca-bundle.crt
+ - master.kubelet-client.crt
+ - master.proxy-client.crt
+ - master.server.crt
+ - openshift-master.crt
+ - service-signer.crt
register: g_master_cert_stat_result
when: not openshift_certificates_redeploy | default(false) | bool
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 9f887891b..64ab07bb5 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -112,7 +112,7 @@ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_contain
openshift_image_tag: ''
default_r_openshift_node_image_prep_packages:
-- "{{ openshift_service_type }}-master"
+#- "{{ openshift_service_type }}-master"
- "{{ openshift_service_type }}-node"
- "{{ openshift_service_type }}-docker-excluder"
- "{{ openshift_service_type }}-sdn-ovs"
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 1a6f209e0..f9f042eeb 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -43,13 +43,13 @@
# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
# regexp: "^ExecStart=.*"
-- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
+- name: "disable {{ openshift_service_type }}-node" # and {{ openshift_service_type }}-master services"
systemd:
name: "{{ item }}"
enabled: no
with_items:
- "{{ openshift_service_type }}-node.service"
- - "{{ openshift_service_type }}-master.service"
+# - "{{ openshift_service_type }}-master.service"
- name: Check for RPM generated config marker file .config_managed
stat:
diff --git a/roles/openshift_prometheus/tasks/uninstall.yaml b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
index d746402db..d746402db 100644
--- a/roles/openshift_prometheus/tasks/uninstall.yaml
+++ b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index c0a8c53de..303589617 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list
diff --git a/roles/openshift_web_console/files/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml
index 547e7a265..5bcfcf73f 100644
--- a/roles/openshift_web_console/files/console-template.yaml
+++ b/roles/openshift_web_console/files/console-template.yaml
@@ -67,10 +67,17 @@ objects:
port: 8443
scheme: HTTPS
livenessProbe:
- httpGet:
- path: /
- port: 8443
- scheme: HTTPS
+ exec:
+ command:
+ - /bin/sh
+ - -i
+ - -c
+ - |-
+ if [[ ! -f /tmp/webconsole-config.hash ]]; then \
+ md5sum /var/webconsole-config/webconsole-config.yaml > /tmp/webconsole-config.hash; \
+ elif [[ $(md5sum /var/webconsole-config/webconsole-config.yaml) != $(cat /tmp/webconsole-config.hash) ]]; then \
+ exit 1; \
+ fi && curl -k -f https://0.0.0.0:8443/console/
resources:
requests:
cpu: 100m
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
index 967222ea4..8b967cda3 100644
--- a/roles/openshift_web_console/tasks/update_console_config.yml
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -5,9 +5,6 @@
# `value` properties in the same format as `yedit` module `edits`. Only
# properties passed are updated. The separator for nested properties is `#`.
#
-# Note that this triggers a redeployment on the console and a brief downtime
-# since it uses a `Recreate` strategy.
-#
# Example usage:
#
# - include_role:
@@ -55,13 +52,9 @@
state: present
from_file:
webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
- register: update_console_config_map
- name: Remove temp directory
file:
state: absent
name: "{{ mktemp_console.stdout }}"
changed_when: False
-
- - include_tasks: rollout_console.yml
- when: update_console_config_map.changed | bool
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
index d0bb61a56..375c22067 100644
--- a/roles/openshift_web_console/vars/openshift-enterprise.yml
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-"
-__openshift_web_console_version: "v3.10"
+__openshift_web_console_version: "v3.9"
__openshift_web_console_image_name: "web-console"