summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.flake82
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--files/origin-components/console-config.yaml27
-rw-r--r--openshift-ansible.spec64
-rw-r--r--playbooks/aws/README.md14
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml41
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_sec_group.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_vpc.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml2
-rw-r--r--playbooks/container-runtime/private/config.yml6
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml5
-rw-r--r--playbooks/init/base_packages.yml4
-rw-r--r--playbooks/init/basic_facts.yml (renamed from playbooks/init/facts.yml)49
-rw-r--r--playbooks/init/cluster_facts.yml42
-rw-r--r--playbooks/init/main.yml11
-rw-r--r--playbooks/init/repos.yml4
-rw-r--r--playbooks/init/sanity_checks.yml3
-rw-r--r--playbooks/openshift-etcd/upgrade.yml3
-rw-r--r--playbooks/openshift-master/scaleup.yml41
-rw-r--r--playbooks/openshift-node/scaleup.yml24
-rw-r--r--playbooks/openstack/README.md8
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/prerequisites.yml9
-rw-r--r--roles/container_runtime/tasks/package_docker.yml11
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/openshift_aws/defaults/main.yml4
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml4
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py5
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py32
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml1
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml5
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml5
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml10
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j224
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j22
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/tasks/install.yml134
-rw-r--r--roles/openshift_web_console/tasks/rollout_console.yml20
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml12
-rw-r--r--roles/openshift_web_console/vars/default_images.yml4
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml4
-rw-r--r--roles/os_firewall/tasks/firewalld.yml5
-rw-r--r--roles/template_service_broker/defaults/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml21
-rw-r--r--roles/template_service_broker/tasks/remove.yml15
-rw-r--r--roles/tuned/tasks/main.yml7
-rw-r--r--utils/src/ooinstall/cli_installer.py89
-rw-r--r--utils/src/ooinstall/openshift_ansible.py18
-rw-r--r--utils/test/cli_installer_tests.py285
-rw-r--r--utils/test/fixture.py11
-rw-r--r--utils/test/oo_config_tests.py31
-rw-r--r--utils/test/test_utils.py1
69 files changed, 747 insertions, 513 deletions
diff --git a/.flake8 b/.flake8
index 99ae3c2f0..cce460d3c 100644
--- a/.flake8
+++ b/.flake8
@@ -1,5 +1,5 @@
[flake8]
# TODO: cleanup flake8 issues with utils/test/*
-exclude=.tox,inventory,utils/test
+exclude=.tox,inventory
max_line_length = 120
ignore = E501,T003
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 4c4a70702..d6dd5a3c8 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.21.0 ./
+3.9.0-0.23.0 ./
diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml
index 901518b28..32a28775f 100644
--- a/files/origin-components/console-config.yaml
+++ b/files/origin-components/console-config.yaml
@@ -6,13 +6,10 @@ clusterInfo:
logoutPublicURL: ""
masterPublicURL: https://127.0.0.1:8443
metricsPublicURL: ""
-# TODO: The new extensions properties cannot be set until
-# origin-web-console-server has been updated with the API changes since
-# `extensions` in the old asset config was an array.
-#extensions:
-# scriptURLs: []
-# stylesheetURLs: []
-# properties: null
+extensions:
+ scriptURLs: []
+ stylesheetURLs: []
+ properties: null
features:
inactivityTimeoutMinutes: 0
servingInfo:
@@ -24,19 +21,3 @@ servingInfo:
maxRequestsInFlight: 0
namedCertificates: null
requestTimeoutSeconds: 0
-
-# START deprecated properties
-# These properties have been renamed and will be removed from the install
-# in a future pull. Keep both the old and new properties for now so that
-# the install is not broken while the origin-web-console image is updated.
-extensionDevelopment: false
-extensionProperties: null
-extensionScripts: null
-extensionStylesheets: null
-extensions: null
-loggingPublicURL: ""
-logoutURL: ""
-masterPublicURL: https://127.0.0.1:8443
-metricsPublicURL: ""
-publicURL: https://127.0.0.1:8443/console/
-# END deprecated properties
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index a7943d5f4..c09e14c66 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.21.0%{?dist}
+Release: 0.23.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -202,6 +202,68 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Jan 23 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.23.0
+- docker_image_availability: enable skopeo to use proxies (lmeyer@redhat.com)
+- Install base_packages earlier (mgugino@redhat.com)
+- allow uninstalling AWS objects created by prerequisite playbook
+ (jdiaz@redhat.com)
+- Bug 1536262: Default console and TSB node selector to
+ openshift_hosted_infra_selector (spadgett@redhat.com)
+- Migrate master-config.yaml asset config (spadgett@redhat.com)
+- Fix master scaleup play (mgugino@redhat.com)
+- use admin credentials for tsb install operations (bparees@redhat.com)
+- Fix etcd-upgrade sanity checks (mgugino@redhat.com)
+- Bug 1536253: Pass `--config` flag on oc commands when installing console
+ (spadgett@redhat.com)
+- Fix enterprise registry-console prefix (sdodson@redhat.com)
+- [release-3.7] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- [release-3.6] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- Bug 1512825 - add mux pod failed for Serial number 02 has already been issued
+ (nhosoi@redhat.com)
+- Remove old console asset config (spadgett@redhat.com)
+- Add support for Amazon EC2 C5 instance types (rteague@redhat.com)
+- Fix provider network support at openstack playbook (ltomasbo@redhat.com)
+
+* Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0
+- Fix OpenStack readme (tomas@sedovic.cz)
+- Quick installer: deprecate upgrades (vrutkovs@redhat.com)
+- Fix node scaleup plays (mgugino@redhat.com)
+- Rollout console after template service broker install (spadgett@redhat.com)
+- Use openshift_is_containerized instead of openshift_is_atomic when installing
+ etcd (vrutkovs@redhat.com)
+- Bug 1535947: Fix missing task in metrics, logging uninstall playbooks
+ (spadgett@redhat.com)
+- Make openshift_web_console_prefix defaults like other components
+ (sdodson@redhat.com)
+- Allow for firewalld on atomic host (sdodson@redhat.com)
+- Drop the testing repo var from openstack readme (tomas@sedovic.cz)
+- Add Azure to support openshift_cloudprovider_kind (wehe@redhat.com)
+- bug 1523047. Annotate ops projects with an .operation prefix
+ (jcantril@redhat.com)
+- Pull openshift_image_tag from oo_masters_to_config rather oo_first_master.
+ (abutcher@redhat.com)
+- Ensure atomic_proxies are configured with docker (mgugino@redhat.com)
+- Default install_result when reloading generated facts. (abutcher@redhat.com)
+- health checks: update required pkg versions (lmeyer@redhat.com)
+- health checks: factor out get_required_version (lmeyer@redhat.com)
+- package_version check: reuse get_major_minor_version (lmeyer@redhat.com)
+- Rework default TSB prefix and imagename to match other services
+ (vrutkovs@redhat.com)
+- Add new grafana playbook. (mrsiano@gmail.com)
+- Remove duplication in node acceptance playbook and setup master groups so
+ that we can use the first master's ansible_ssh_user when delegating.
+ (abutcher@redhat.com)
+- Setting default storage_class_names for when calling
+ openshift_logging_elasticsearch role (ewolinet@redhat.com)
+- adding check if secret auth is needed (shawn.hurley21@gmail.com)
+- adding asb auth as a secret. (shawn.hurley21@gmail.com)
+- Ensure we are running oc execs against running pods (ewolinet@redhat.com)
+- Automatic profile setting for tuned 2.9 (jmencak@redhat.com)
+- Fix flake8 errors in utils/test (vrutkovs@redhat.com)
+- kibana checks: use six.moves instead of ImportError (vrutkovs@redhat.com)
+
* Wed Jan 17 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.21.0
- Add call to 3.8 playbook in 3.9 upgrade (sdodson@redhat.com)
- Remove 3.8 and 3.9 specific steps right now (sdodson@redhat.com)
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index d203b9cda..bdc98d1e0 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -198,3 +198,17 @@ At this point your cluster should be ready for workloads. Proceed to deploy app
### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
+
+## Uninstall / Deprovisioning
+
+At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
+
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
+```
+
+This should result in removal of the security groups and VPC that were created.
+
+NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index e7bed4f6e..46c453333 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,8 +1,7 @@
#!/usr/bin/ansible-playbook
---
-- name: Setup the vpc and the master node group
+- name: Accept nodes
hosts: localhost
- remote_user: root
gather_facts: no
tasks:
- name: Alert user to variables needed - clusterid
@@ -17,37 +16,7 @@
import_role:
name: lib_openshift
- - name: fetch masters
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: mastersout
- retries: 20
- delay: 3
- until: "'instances' in mastersout and mastersout.instances|length > 0"
-
- - name: fetch new node instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": node
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: "'instances' in instancesout and instancesout.instances|length > 0"
-
- - debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
-
- - name: approve nodes
- oc_adm_csr:
- #approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- timeout: 60
- register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ - name: accept nodes
+ import_role:
+ name: openshift_aws
+ tasks_from: accept_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
new file mode 100644
index 000000000..180c2281a
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: uninstall_sec_group.yml
+
+- import_playbook: uninstall_vpc.yml
+
+- import_playbook: uninstall_ssh_keypair.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
new file mode 100644
index 000000000..642e5b169
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete security groups
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
new file mode 100644
index 000000000..ec9caa51b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: remove ssh keypair(s)
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_ssh_keys.yml
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
new file mode 100644
index 000000000..4c988bcc5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8ee83819e..ba783638d 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,7 +5,8 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index eb5f07ae0..d88880140 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 8d42e4c91..ce069e2d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 51da45311..3f26a6297 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 384eeed4c..0f48725f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
## If they've specified pkg_version or image_tag preserve that for later use
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
index 7fd60743c..a2361d50c 100644
--- a/playbooks/container-runtime/private/build_container_groups.yml
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -3,4 +3,4 @@
hosts: oo_all_hosts:!oo_nodes_to_config
tasks:
- group_by:
- key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
+ key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 7a49adcf0..817a8bf30 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,7 +1,11 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}"
+ vars:
+ l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
roles:
- role: container_runtime
tasks:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index a6d396270..65630be62 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,8 +1,11 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}"
vars:
+ l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
# role: container_runtime is necessary here to bring role default variables
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 15b3dd492..e1052fb6c 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Install packages necessary for installer
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
any_errors_fatal: true
tasks:
- when:
diff --git a/playbooks/init/facts.yml b/playbooks/init/basic_facts.yml
index 8e4206948..06a4e7291 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -4,15 +4,13 @@
any_errors_fatal: true
tasks:
-- name: Initialize host facts
- # l_upgrade_non_node_hosts is passed in via play during control-plane-only
- # upgrades; otherwise oo_all_hosts is used.
- hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
+- name: Initialize basic host facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
tasks:
- - name: load openshift_facts module
- import_role:
- name: openshift_facts
-
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
import_role:
@@ -58,41 +56,6 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - name: Gather Cluster facts
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
- - name: Set fact of no_proxy_internal_hostnames
- openshift_facts:
- role: common
- local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
- - name: Initialize openshift.node.sdn_mtu
- openshift_facts:
- role: node
- local_facts:
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
- name: Initialize special first-master variables
hosts: oo_first_master
roles:
diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml
new file mode 100644
index 000000000..636679e32
--- /dev/null
+++ b/playbooks/init/cluster_facts.yml
@@ -0,0 +1,42 @@
+---
+- name: Initialize cluster facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Gather Cluster facts
+ openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 8a3f4682d..9886691e0 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -1,4 +1,7 @@
---
+# skip_verison and l_install_base_packages are passed in via prerequistes.yml.
+# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
+
- name: Initialization Checkpoint Start
hosts: all
gather_facts: false
@@ -15,7 +18,13 @@
- import_playbook: evaluate_groups.yml
-- import_playbook: facts.yml
+- import_playbook: basic_facts.yml
+
+# base_packages needs to be setup for openshift_facts.py to run correctly.
+- import_playbook: base_packages.yml
+ when: l_install_base_packages | default(False) | bool
+
+- import_playbook: cluster_facts.yml
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index 667f38ddd..655a7e83a 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
gather_facts: no
tasks:
- name: subscribe instances to Red Hat Subscription Manager
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
index 52bcf42c0..fbbb3f8fb 100644
--- a/playbooks/init/sanity_checks.yml
+++ b/playbooks/init/sanity_checks.yml
@@ -1,4 +1,5 @@
---
+# l_sanity_check_hosts may be passed in during scale-up plays
- name: Verify Requirements
hosts: oo_first_master
roles:
@@ -11,5 +12,5 @@
# Thus, sanity_checks cannot gather new information about any hosts.
- name: Run variable sanity checks
sanity_checks:
- check_hosts: "{{ groups['oo_all_hosts'] }}"
+ check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}"
run_once: True
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index 71606e7e4..77999d92c 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -2,6 +2,7 @@
- import_playbook: ../init/main.yml
vars:
skip_verison: True
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 7d31340a2..09e205afc 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -1,22 +1,43 @@
---
- import_playbook: ../init/evaluate_groups.yml
-- name: Ensure there are new_masters or new_nodes
+- name: Ensure there are new_masters and new_nodes
hosts: localhost
connection: local
gather_facts: no
tasks:
- fail:
+ # new_masters must be part of new_nodes as well; otherwise if new_nodes
+ # is not present, oo_nodes_to_config will contain all existing nodes.
msg: >
- Detected no new_masters or no new_nodes in inventory. Please
- add hosts to the new_masters and new_nodes host groups to add
- masters.
- when:
- - g_new_master_hosts | default([]) | length == 0
- - g_new_node_hosts | default([]) | length == 0
+ Detected no new_masters and/or no new_nodes in inventory. New
+ masters must be part of both new_masters and new_nodes groups.
+ If you are adding just new_nodes, use the
+ playbooks/openshift-node/scaleup.yml play.
+ when: >
+ g_new_master_hosts | default([]) | length == 0
+ or g_new_node_hosts | default([]) | length == 0
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- name: Ensure there are new_masters and new_nodes
+ hosts: oo_masters_to_config
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ # new_masters must be part of new_nodes as well;
+ msg: >
+ Each host in new_masters must also appear in new_nodes
+ when: inventory_hostname not in groups['oo_nodes_to_config']
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config"
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index cf13692ae..9cc7263b7 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -12,9 +12,27 @@
new_nodes host group to add nodes.
when:
- g_new_node_hosts | default([]) | length == 0
+ - fail:
+ msg: >
+ Please run playbooks/openshift-master/scaleup.yml if you need to
+ scale up both masters and nodes. This playbook is only needed if
+ you are only adding new nodes and not new masters.
+ when:
+ - g_new_node_hosts | default([]) | length > 0
+ - g_new_master_hosts | default([]) | length > 0
+
+# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
+# g_new_node_hosts via evaluate_groups.yml
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_nodes_to_config"
- import_playbook: private/config.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index 2eb668dd1..842bb34de 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -185,15 +185,11 @@ resources:
```bash
$ ansible-playbook --user openshift \
- -i openshift-ansible/playbooks/openstack/inventory.py
+ -i openshift-ansible/playbooks/openstack/inventory.py \
-i inventory \
- openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml \
- -e openshift_repos_enable_testing=true
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml
```
-Note, you may want to use the testing repo for development purposes only.
-Normally, `openshift_repos_enable_testing` should not be specified.
-
In addition to *your* inventory with your OpenShift and OpenStack
configuration, we are also supplying the [dynamic inventory][dynamic] from
`openshift-ansible/inventory`. It's a script that will look at the Nova servers
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index a38d7bff7..73c1926a0 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -26,8 +26,8 @@
- name: Gather facts for the new nodes
setup:
-- name: set common facts
- import_playbook: ../../init/facts.yml
+- import_playbook: ../../init/basic_facts.yml
+- import_playbook: ../../init/cluster_facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7802f83d9..0b76ca862 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,18 +1,21 @@
---
+# l_scale_up_hosts may be passed in via various scaleup plays.
+
- import_playbook: init/main.yml
vars:
skip_verison: True
+ l_install_base_packages: True
- import_playbook: init/validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
- import_playbook: init/repos.yml
-- import_playbook: init/base_packages.yml
-
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"
+ vars:
+ l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config"
roles:
- role: os_firewall
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index d6e7e7fed..ed9a2709b 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,6 +1,17 @@
---
- include_tasks: common/pre.yml
+# In some cases, some services may be run as containers and docker may still
+# be installed via rpm.
+- include_tasks: common/atomic_proxy.yml
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift_is_atomic | bool
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 881a8c270..cab835e20 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -1,7 +1,7 @@
---
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
- when: not openshift_is_atomic | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index efd2468b2..a729e8dbd 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -301,3 +301,7 @@ openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
openshift_aws_masters_groups: masters,etcd,nodes
+
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index c2a2cea30..db30fe5c9 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: setup_master_group.yml
+
- name: fetch masters
ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
@@ -36,4 +38,4 @@
nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
timeout: 60
register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ delegate_to: "{{ groups.masters.0 }}"
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d7c358a2f..26f0525e9 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1465,6 +1465,11 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
+ elif bios_vendor == 'Amazon EC2':
+ # Adds support for Amazon EC2 C5 instance types
+ provider = 'aws'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = get_provider_metadata(metadata_url)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index ac6ffbbad..d298fbab2 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
# command for checking if remote registries have an image, without docker pull
- skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
@@ -76,11 +76,20 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if oreg_auth_user != '' and oreg_auth_password != '':
oreg_auth_user = self.template_var(oreg_auth_user)
oreg_auth_password = self.template_var(oreg_auth_password)
- self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+ self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
+ # take note of any proxy settings needed
+ proxies = []
+ for var in ['http_proxy', 'https_proxy', 'no_proxy']:
+ # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy
+ value = self.get_var("openshift_" + var, default=None)
+ if value:
+ proxies.append(var.upper() + "=" + quote(self.template_var(value)))
+ self.skopeo_proxy_vars = " ".join(proxies)
+
def is_active(self):
"""Skip hosts with unsupported deployment types."""
deployment_type = self.get_var("openshift_deployment_type")
@@ -249,11 +258,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not self.reachable_registries[registry]:
continue # do not keep trying unreachable registries
- args = dict(registry=registry, image=image)
- args["tls"] = "false" if registry in self.registries["insecure"] else "true"
- args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
+ args = dict(
+ proxyvars=self.skopeo_proxy_vars,
+ tls="false" if registry in self.registries["insecure"] else "true",
+ creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "",
+ registry=quote(registry),
+ image=quote(image),
+ )
- result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
+ result = self.execute_module_with_retries("command", {
+ "_uses_shell": True,
+ "_raw_params": self.skopeo_command.format(**args),
+ })
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
@@ -263,6 +279,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def connect_to_registry(self, registry):
"""Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ if self.skopeo_proxy_vars != "":
+ # assume we can't connect directly; just waive the test
+ return True
+
# test a simple TCP connection
host, _, port = registry.partition(":")
port = port or 443
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index cc3159a32..0786e2d2f 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 9f2e6125d..ccea54aaf 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index f04ce06d3..15ad4e9af 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
index c178cf432..7acefa0f0 100644
--- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index 4a2ee64f0..6fdba6580 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -12,6 +12,7 @@
separator: '#'
content:
metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ metadata#annotations#openshift.io/logging.data.prefix: ".operations"
with_items: "{{ __logging_ops_projects.stdout.split(' ') }}"
loop_control:
loop_var: project
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index fbc3e3fd1..ced7397b5 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -131,13 +131,13 @@
when:
not openshift_logging_install_eventrouter | default(false) | bool
-# Update asset config in openshift-web-console namespace
-- name: Remove Kibana route information from web console asset config
+# Update console config in openshift-web-console namespace
+- name: Remove Kibana route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: loggingPublicURL
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 0d7f8c056..a40449bf6 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -19,7 +19,7 @@
command: >
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
- --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false
check_mode: no
when:
- not ca_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index f82e55b98..3afd8680f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -326,9 +326,4 @@
console_config_edits:
- key: clusterInfo#loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
- # Continue to set the old deprecated property until the
- # origin-web-console image is updated for the new name.
- # This will be removed in a future pull.
- - key: loggingPublicURL
- value: "https://{{ openshift_logging_kibana_hostname }}"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index bc817075d..d28d1d160 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -30,7 +30,7 @@
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
- --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false
check_mode: no
when:
- cert_info.hostnames is defined
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 4a63d081e..0dd5d1621 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -79,11 +79,6 @@
console_config_edits:
- key: clusterInfo#metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- # Continue to set the old deprecated property until the
- # origin-web-console image is updated for the new name.
- # This will be removed in a future pull.
- - key: metricsPublicURL
- value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
when: openshift_web_console_install | default(true) | bool
- command: >
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 610c7b4e5..1664e9975 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -19,13 +19,13 @@
clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
-# Update asset config in openshift-web-console namespace
-- name: Remove metrics route information from web console asset config
+# Update the web config in openshift-web-console namespace
+- name: Remove metrics route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: metricsPublicURL
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1be5d3a62..8e7c6288a 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -523,7 +523,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -589,8 +589,13 @@ resources:
secgrp:
- { get_resource: lb-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_lb_volume_size }}
{% if not openshift_openstack_provider_network_name %}
@@ -655,7 +660,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -725,7 +730,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -792,8 +797,13 @@ resources:
{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_infra_volume_size }}
{% if openshift_openstack_infra_server_group_policies|length > 0 %}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index 1e73c9e1c..29b09f3c9 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -102,13 +102,11 @@ parameters:
label: Attach-float-net
description: A switch for floating network port connection
-{% if not openshift_openstack_provider_network_name %}
floating_network:
type: string
default: ''
label: Floating network
description: Network to allocate floating IP from
-{% endif %}
availability_zone:
type: string
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
index 4f395398c..c747f73a8 100644
--- a/roles/openshift_web_console/defaults/main.yml
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -1,3 +1,2 @@
---
-# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
-openshift_web_console_nodeselector: {"region":"infra"}
+openshift_web_console_nodeselector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index 50e72657f..ead62799a 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -21,12 +21,17 @@
node_selector:
- ""
-- name: Make temp directory for the web console config files
+- name: Make temp directory for web console templates
command: mktemp -d /tmp/console-ansible-XXXXXX
register: mktemp
changed_when: False
-- name: Copy the web console config template to temp directory
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
+- name: Copy web console templates to temp directory
copy:
src: "{{ __console_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -35,61 +40,102 @@
- "{{ __console_rbac_file }}"
- "{{ __console_config_file }}"
-- name: Update the web console config properties
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: clusterInfo#masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: features#inactivityTimeoutMinutes
- value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
-
- # TODO: The new extensions properties cannot be set until
- # origin-web-console-server has been updated with the API changes since
- # `extensions` in the old asset config was an array.
-
- # - key: extensions#scriptURLs
- # value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- # - key: extensions#stylesheetURLs
- # value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
- # - key: extensions#properties
- # value: "{{ openshift_web_console_extension_properties | default({}) }}"
-
- # DEPRECATED PROPERTIES
- # These properties have been renamed and will be removed from the install
- # in a future pull. Keep both the old and new properties for now so that
- # the install is not broken while the origin-web-console image is updated.
- - key: publicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: logoutURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
- separator: '#'
- state: present
+# Check if an existing webconsole-config config map exists. If so, use those
+# contents so we don't overwrite changes.
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ when: existing_config_map_data['webconsole-config.yaml'] is defined
+
+# Generate a new config when a config map is not defined.
+- when: existing_config_map_data['webconsole-config.yaml'] is not defined
+ block:
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- register: config
+ register: updated_console_config
- name: Reconcile with the web console RBAC file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- name: Apply the web console template file
shell: >
{{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
- --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}"
--param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
--param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
--param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
- | {{ openshift_client_binary }} apply -f -
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- name: Verify that the web console is running
command: >
diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml
new file mode 100644
index 000000000..75682ba1d
--- /dev/null
+++ b/roles/openshift_web_console/tasks/rollout_console.yml
@@ -0,0 +1,20 @@
+---
+- name: Check if console deployment exists
+ oc_obj:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ state: list
+ register: console_deployment
+
+# There's currently no command to trigger a rollout for a k8s deployment
+# without changing the pod spec. Add an annotation to force a rollout.
+- name: Rollout updated web console deployment
+ oc_edit:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ separator: '#'
+ content:
+ spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
+ when: console_deployment.results.results.0 | length > 0
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
index e347c0193..4d2957977 100644
--- a/roles/openshift_web_console/tasks/update_console_config.yml
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -58,14 +58,4 @@
changed_when: False
# TODO: Only rollout if config has changed.
-# There's currently no command to trigger a rollout for a k8s deployment
-# without changing the pod spec. Add an annotation to force a rollout after
-# the config map has been edited.
-- name: Rollout updated web console deployment
- oc_edit:
- kind: deployments
- name: webconsole
- namespace: openshift-web-console
- separator: '#'
- content:
- spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
+- include_tasks: rollout_console.yml
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
index 7adb8a0d0..42d331ac5 100644
--- a/roles/openshift_web_console/vars/default_images.yml
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "docker.io/openshift/"
+__openshift_web_console_prefix: "docker.io/openshift/origin-"
__openshift_web_console_version: "latest"
-__openshift_web_console_image_name: "origin-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
index 721ac1d27..375c22067 100644
--- a/roles/openshift_web_console/vars/openshift-enterprise.yml
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-"
__openshift_web_console_version: "v3.9"
-__openshift_web_console_image_name: "ose-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml
index 4eae31596..fa933da51 100644
--- a/roles/os_firewall/tasks/firewalld.yml
+++ b/roles/os_firewall/tasks/firewalld.yml
@@ -2,7 +2,9 @@
- name: Fail - Firewalld is not supported on Atomic Host
fail:
msg: "Firewalld is not supported on Atomic Host"
- when: r_os_firewall_is_atomic | bool
+ when:
+ - r_os_firewall_is_atomic | bool
+ - not openshift_enable_unsupported_configurations | default(false)
- name: Install firewalld packages
package:
@@ -10,6 +12,7 @@
state: present
register: result
until: result is succeeded
+ when: not r_os_firewall_is_atomic | bool
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index c32872d24..3465832cc 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,4 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
-template_service_broker_selector: { "region": "infra" }
+template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 604e94602..4e6ad2ae5 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -22,6 +22,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -43,16 +48,18 @@
- name: Apply template file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift_client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}"
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -79,9 +86,15 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- file:
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is installed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index db1b558e4..48dc1327e 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -3,6 +3,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -12,11 +17,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
@@ -31,3 +36,9 @@
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is removed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml
index 4a28d47b2..5129f4471 100644
--- a/roles/tuned/tasks/main.yml
+++ b/roles/tuned/tasks/main.yml
@@ -28,7 +28,12 @@
when: item.state == 'file'
- name: Make tuned use the recommended tuned profile on restart
- file: path=/etc/tuned/active_profile state=absent
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/tuned/active_profile
+ - /etc/tuned/profile_mode
- name: Restart tuned service
systemd:
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 1226242d0..a85a43bd3 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -938,91 +938,10 @@ def uninstall(ctx):
@click.pass_context
# pylint: disable=too-many-statements,too-many-branches
def upgrade(ctx, latest_minor, next_major):
- oo_cfg = ctx.obj['oo_cfg']
-
- if len(oo_cfg.deployment.hosts) == 0:
- click.echo("No hosts defined in: %s" % oo_cfg.config_path)
- sys.exit(1)
-
- variant = oo_cfg.settings['variant']
- if find_variant(variant)[0] is None:
- click.echo("%s is not a supported variant for upgrade." % variant)
- sys.exit(0)
-
- old_version = oo_cfg.settings['variant_version']
-
- try:
- mapping = UPGRADE_MAPPINGS[old_version]
- except KeyError:
- click.echo('No upgrades available for %s %s' % (variant, old_version))
- sys.exit(0)
-
- message = """
- This tool will help you upgrade your existing OpenShift installation.
- Currently running: %s %s
-"""
- click.echo(message % (variant, old_version))
-
- # Map the dynamic upgrade options to the playbook to run for each.
- # Index offset by 1.
- # List contains tuples of booleans for (latest_minor, next_major)
- selections = []
- if not (latest_minor or next_major):
- i = 0
- if 'minor_playbook' in mapping:
- click.echo("(%s) Update to latest %s" % (i + 1, old_version))
- selections.append((True, False))
- i += 1
- if 'major_playbook' in mapping:
- click.echo("(%s) Upgrade to next release: %s" % (i + 1, mapping['major_version']))
- selections.append((False, True))
- i += 1
-
- response = click.prompt("\nChoose an option from above",
- type=click.Choice(list(map(str, range(1, len(selections) + 1)))))
- latest_minor, next_major = selections[int(response) - 1]
-
- if next_major:
- if 'major_playbook' not in mapping:
- click.echo("No major upgrade supported for %s %s with this version "
- "of atomic-openshift-utils." % (variant, old_version))
- sys.exit(0)
- playbook = mapping['major_playbook']
- new_version = mapping['major_version']
- # Update config to reflect the version we're targeting, we'll write
- # to disk once Ansible completes successfully, not before.
- oo_cfg.settings['variant_version'] = new_version
- if oo_cfg.settings['variant'] == 'enterprise':
- oo_cfg.settings['variant'] = 'openshift-enterprise'
-
- if latest_minor:
- if 'minor_playbook' not in mapping:
- click.echo("No minor upgrade supported for %s %s with this version "
- "of atomic-openshift-utils." % (variant, old_version))
- sys.exit(0)
- playbook = mapping['minor_playbook']
- new_version = old_version
-
- click.echo("OpenShift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % (
- variant, old_version, oo_cfg.settings['variant'], new_version))
- for host in oo_cfg.deployment.hosts:
- click.echo(" * %s" % host.connect_to)
-
- if not ctx.obj['unattended']:
- # Prompt interactively to confirm:
- if not click.confirm("\nDo you want to proceed?"):
- click.echo("Upgrade cancelled.")
- sys.exit(0)
-
- retcode = openshift_ansible.run_upgrade_playbook(oo_cfg.deployment.hosts,
- playbook,
- ctx.obj['verbose'])
- if retcode > 0:
- click.echo("Errors encountered during upgrade, please check %s." %
- oo_cfg.settings['ansible_log_path'])
- else:
- oo_cfg.save_to_disk()
- click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+ click.echo("Upgrades are no longer supported by this version of installer")
+ click.echo("Please see the documentation for manual upgrade:")
+ click.echo("https://docs.openshift.com/container-platform/latest/install_config/upgrading/automated_upgrades.html")
+ sys.exit(1)
@click.command()
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index dda8eb4c6..216664cd0 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -321,21 +321,3 @@ def run_uninstall_playbook(hosts, verbose=False):
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
-
-
-def run_upgrade_playbook(hosts, playbook, verbose=False):
- playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
- 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
-
- # TODO: Upgrade inventory for upgrade?
- inventory_file = generate_inventory(hosts)
- facts_env = os.environ.copy()
- if 'ansible_log_path' in CFG.settings:
- facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
- if 'ansible_config' in CFG.settings:
- facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- # override the ansible config for our main playbook run
- if 'ansible_quiet_config' in CFG.settings:
- facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
-
- return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 673997c42..2259f3416 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -384,6 +384,7 @@ deployment:
storage:
"""
+
class UnattendedCliTests(OOCliFixture):
def setUp(self):
@@ -402,8 +403,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -481,8 +483,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -490,16 +493,18 @@ class UnattendedCliTests(OOCliFixture):
load_facts_args = load_facts_mock.call_args[0]
self.assertEquals(os.path.join(self.work_dir, "hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ load_facts_args[0])
+ self.assertEquals(
+ os.path.join(self.work_dir, "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
+ self.assertEquals(
+ os.path.join(self.work_dir, '.ansible/callback_facts.yaml'),
env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
# If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ self.assertTrue(
+ 'ANSIBLE_CONFIG' not in env_vars or
env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Make sure we ran on the expected masters and nodes:
@@ -515,8 +520,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), merged_config)
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ merged_config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -526,9 +532,9 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('root',
- inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
# Check the masters:
self.assertEquals(1, len(inventory.items('masters')))
@@ -546,13 +552,13 @@ class UnattendedCliTests(OOCliFixture):
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_variant_version_latest_assumed(self, load_facts_mock,
- run_playbook_mock):
+ def test_variant_version_latest_assumed(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -569,19 +575,18 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_variant_version_preserved(self, load_facts_mock,
- run_playbook_mock):
+ def test_variant_version_preserved(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config = SAMPLE_CONFIG % 'openshift-enterprise'
config = '%s\n%s' % (config, 'variant_version: 3.3')
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), config)
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -597,7 +602,7 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -606,25 +611,28 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ BAD_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assertEquals(1, result.exit_code)
- self.assertTrue("You must specify either an ip or hostname"
+ self.assertTrue(
+ "You must specify either an ip or hostname"
in result.output)
- #unattended with three masters, one node, and haproxy
+ # unattended with three masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -636,15 +644,16 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals(6, len(hosts))
self.assertEquals(6, len(hosts_to_run_on))
- #unattended with two masters, one node, and haproxy
+ # unattended with two masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -653,15 +662,16 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 1)
self.assertTrue("A minimum of 3 masters are required" in result.output)
- #unattended with three masters, one node, but no load balancer specified:
+ # unattended with three masters, one node, but no load balancer specified:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -670,15 +680,16 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 1)
self.assertTrue('No master load balancer specified in config' in result.output)
- #unattended with three masters, one node, and one of the masters reused as load balancer:
+ # unattended with three masters, one node, and one of the masters reused as load balancer:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -686,15 +697,16 @@ class UnattendedCliTests(OOCliFixture):
# This is not a valid configuration:
self.assert_result(result, 1)
- #unattended with preconfigured lb
+ # unattended with preconfigured lb
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -706,6 +718,7 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals(6, len(hosts))
self.assertEquals(6, len(hosts_to_run_on))
+
class AttendedCliTests(OOCliFixture):
def setUp(self):
@@ -720,17 +733,18 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
- ('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -741,12 +755,12 @@ class AttendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
- self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
- 'openshift_schedulable=False')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
- 'openshift_schedulable=True')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
- 'openshift_schedulable=True')
+ self.assert_inventory_host_var(
+ inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True')
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -762,15 +776,16 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
],
- add_nodes=[('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.0.0.1',)
+ add_nodes=[('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1',)
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
@@ -781,7 +796,6 @@ class AttendedCliTests(OOCliFixture):
self.assertTrue('scaleup' in result.output)
self.assert_result(result, 1)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
@@ -830,26 +844,27 @@ class AttendedCliTests(OOCliFixture):
# exp_hosts_to_run_on_len=2,
# force=False)
- #interactive multimaster: one more node than master
+ # interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', True, False),
- ('10.0.0.4', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=('10.0.0.5', False),
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -872,25 +887,26 @@ class AttendedCliTests(OOCliFixture):
self.assertTrue(inventory.has_section('etcd'))
self.assertEquals(3, len(inventory.items('etcd')))
- #interactive multimaster: identical masters and nodes
+ # interactive multimaster: identical masters and nodes
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=('10.0.0.5', False),
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -919,7 +935,9 @@ class AttendedCliTests(OOCliFixture):
full_line = "%s=%s" % (a, b)
tokens = full_line.split()
if tokens[0] == host:
- self.assertTrue(variable in tokens[1:], "Unable to find %s in line: %s" % (variable, full_line))
+ self.assertTrue(
+ variable in tokens[1:],
+ "Unable to find %s in line: %s" % (variable, full_line))
return
self.fail("unable to find host %s in inventory" % host)
@@ -938,45 +956,46 @@ class AttendedCliTests(OOCliFixture):
return
self.fail("unable to find host %s in inventory" % host)
-
- #interactive multimaster: attempting to use a master as the load balancer should fail:
+ # interactive multimaster: attempting to use a master as the load balancer should fail:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', False, False),
- ('10.0.0.4', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=(['10.0.0.2', '10.0.0.5'], False),
- storage='10.1.0.1')
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False),
+ storage='10.1.0.1')
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
- #interactive all-in-one
+ # interactive all-in-one
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_all_in_one(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.0.0.1')
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1')
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -990,25 +1009,25 @@ class AttendedCliTests(OOCliFixture):
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_gen_inventory(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
- ('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
self.cli_args.append("--gen-inventory")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -1021,12 +1040,12 @@ class AttendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
- self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
- 'openshift_schedulable=False')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
- 'openshift_schedulable=True')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
- 'openshift_schedulable=True')
+ self.assert_inventory_host_var(
+ inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True')
# TODO: test with config file, attended add node
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 873ac4a27..5c0d1d2c1 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -43,6 +43,7 @@ deployment:
node:
"""
+
def read_yaml(config_file_path):
cfg_f = open(config_file_path, 'r')
config = yaml.safe_load(cfg_f.read())
@@ -105,7 +106,7 @@ class OOCliFixture(OOInstallFixture):
self.assertTrue('ip' in host)
self.assertTrue('public_ip' in host)
- #pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments
def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
run_playbook_mock, cli_input,
exp_hosts_len=None, exp_hosts_to_run_on_len=None,
@@ -152,7 +153,7 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
+# pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=('', False), storage=None):
@@ -190,7 +191,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
else:
inputs.append('rpm')
- #inputs.append('rpm')
+ # inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
# will not be asked, and the user must enter the next host:
@@ -224,13 +225,13 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
- for (host, is_master, is_containerized) in add_nodes:
+ for (host, _, is_containerized) in add_nodes:
inputs.append(host)
if is_containerized:
inputs.append('container')
else:
inputs.append('rpm')
- #inputs.append('rpm')
+ # inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
else:
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 5651e6e7a..80cdbe618 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -107,6 +107,7 @@ deployment:
node:
"""
+
class OOInstallFixture(unittest.TestCase):
def setUp(self):
@@ -133,13 +134,12 @@ class OOInstallFixture(unittest.TestCase):
return path
-
class OOConfigTests(OOInstallFixture):
def test_load_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
self.assertEquals(3, len(ooconfig.deployment.hosts))
@@ -155,26 +155,25 @@ class OOConfigTests(OOInstallFixture):
def test_load_bad_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), CONFIG_BAD)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_BAD)
try:
OOConfig(cfg_path)
assert False
except OOConfigInvalidHostError:
assert True
-
def test_load_complete_facts(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(0, len(missing_host_facts))
# Test missing optional facts the user must confirm:
def test_load_host_incomplete_facts(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(2, len(missing_host_facts))
@@ -182,8 +181,8 @@ class OOConfigTests(OOInstallFixture):
self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
def test_write_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
ooconfig.save_to_disk()
@@ -191,8 +190,6 @@ class OOConfigTests(OOInstallFixture):
written_config = yaml.safe_load(f.read())
f.close()
-
-
self.assertEquals(3, len(written_config['deployment']['hosts']))
for h in written_config['deployment']['hosts']:
self.assertTrue('ip' in h)
@@ -259,8 +256,10 @@ class HostTests(OOInstallFixture):
# Given the `yaml_props` above we should see a line like this:
# openshift_node_labels="{'region': 'infra'}"
- node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash
- node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash
+ # Quotes around the hash
+ node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"'''
+ # No quotes around the hash
+ node_labels_bad = '''openshift_node_labels={'region': 'infra'}'''
# The good line is present in the written inventory line
self.assertIn(node_labels_expected, legacy_inventory_line)
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index cabeaee34..a72e429d1 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -29,7 +29,6 @@ class TestUtils(unittest.TestCase):
mock.call('OO_FOO: bar'),
]
-
######################################################################
# Validate ooinstall.utils.debug_env functionality