summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--ansible.cfg2
-rw-r--r--files/origin-components/console-config.yaml41
-rw-r--r--files/origin-components/console-rbac-template.yaml38
-rw-r--r--images/installer/Dockerfile2
-rw-r--r--openshift-ansible.spec15
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provision_elb.yml9
-rw-r--r--playbooks/aws/openshift-cluster/provision_s3.yml10
-rw-r--r--playbooks/cluster-operator/aws/infrastructure.yml21
l---------playbooks/cluster-operator/aws/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml62
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml2
-rw-r--r--playbooks/openshift-etcd/upgrade.yml5
-rw-r--r--playbooks/openstack/README.md12
-rw-r--r--playbooks/openstack/advanced-configuration.md7
-rwxr-xr-xplaybooks/openstack/inventory.py (renamed from playbooks/openstack/sample-inventory/inventory.py)0
-rw-r--r--roles/ansible_service_broker/tasks/install.yml9
-rw-r--r--roles/calico_master/tasks/main.yml2
-rw-r--r--roles/kuryr/tasks/node.yaml2
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j219
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2357
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py11
-rw-r--r--roles/lib_utils/filter_plugins/openshift_master.py6
-rw-r--r--roles/openshift_aws/defaults/main.yml8
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_examples/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py2
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml13
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml9
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml23
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml9
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml8
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml2
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml2
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml12
-rw-r--r--roles/openshift_version/tasks/check_available_rpms.yml2
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml5
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml6
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml7
-rw-r--r--roles/openshift_web_console/tasks/install.yml42
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml (renamed from roles/openshift_web_console/tasks/update_asset_config.yml)29
-rw-r--r--roles/openshift_web_console/vars/main.yml1
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py6
54 files changed, 741 insertions, 174 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a2b323b95..61e7d68a2 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.19.0 ./
+3.9.0-0.20.0 ./
diff --git a/ansible.cfg b/ansible.cfg
index c1c76a496..67149cb35 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -22,7 +22,7 @@ fact_caching = jsonfile
fact_caching_connection = $HOME/ansible/facts
fact_caching_timeout = 600
callback_whitelist = profile_tasks
-inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
# work around privilege escalation timeouts in ansible:
timeout = 30
diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml
index e104e8028..901518b28 100644
--- a/files/origin-components/console-config.yaml
+++ b/files/origin-components/console-config.yaml
@@ -1,5 +1,34 @@
-kind: WebConsoleConfiguration
apiVersion: webconsole.config.openshift.io/v1
+kind: WebConsoleConfiguration
+clusterInfo:
+ consolePublicURL: https://127.0.0.1:8443/console/
+ loggingPublicURL: ""
+ logoutPublicURL: ""
+ masterPublicURL: https://127.0.0.1:8443
+ metricsPublicURL: ""
+# TODO: The new extensions properties cannot be set until
+# origin-web-console-server has been updated with the API changes since
+# `extensions` in the old asset config was an array.
+#extensions:
+# scriptURLs: []
+# stylesheetURLs: []
+# properties: null
+features:
+ inactivityTimeoutMinutes: 0
+servingInfo:
+ bindAddress: 0.0.0.0:8443
+ bindNetwork: tcp4
+ certFile: /var/serving-cert/tls.crt
+ clientCA: ""
+ keyFile: /var/serving-cert/tls.key
+ maxRequestsInFlight: 0
+ namedCertificates: null
+ requestTimeoutSeconds: 0
+
+# START deprecated properties
+# These properties have been renamed and will be removed from the install
+# in a future pull. Keep both the old and new properties for now so that
+# the install is not broken while the origin-web-console image is updated.
extensionDevelopment: false
extensionProperties: null
extensionScripts: null
@@ -10,12 +39,4 @@ logoutURL: ""
masterPublicURL: https://127.0.0.1:8443
metricsPublicURL: ""
publicURL: https://127.0.0.1:8443/console/
-servingInfo:
- bindAddress: 0.0.0.0:8443
- bindNetwork: tcp4
- certFile: /var/serving-cert/tls.crt
- clientCA: ""
- keyFile: /var/serving-cert/tls.key
- maxRequestsInFlight: 0
- namedCertificates: null
- requestTimeoutSeconds: 0
+# END deprecated properties
diff --git a/files/origin-components/console-rbac-template.yaml b/files/origin-components/console-rbac-template.yaml
new file mode 100644
index 000000000..9ee117199
--- /dev/null
+++ b/files/origin-components/console-rbac-template.yaml
@@ -0,0 +1,38 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: web-console-server-rbac
+parameters:
+- name: NAMESPACE
+ # This namespace cannot be changed. Only `openshift-web-console` is supported.
+ value: openshift-web-console
+objects:
+
+
+# allow grant powers to the webconsole server for cluster inspection
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRole
+ metadata:
+ name: system:openshift:web-console-server
+ rules:
+ - apiGroups:
+ - "servicecatalog.k8s.io"
+ resources:
+ - clusterservicebrokers
+ verbs:
+ - get
+ - list
+ - watch
+
+# Grant the service account for the web console
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: system:openshift:web-console-server
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:web-console-server
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: webconsole
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index db362bd65..b1390480a 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /
# install ansible and deps
RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto google-cloud-sdk-183.0.0 which" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
&& rpm -V $INSTALL_PKGS $EPEL_PKGS \
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index a301cfe5b..63c36f551 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.19.0%{?dist}
+Release: 0.20.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -202,6 +202,19 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Jan 15 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.20.0
+- Adjust openstack provider dependencies versions (bdobreli@redhat.com)
+- Fix openstack provider playbook name in docs (bdobreli@redhat.com)
+- Install web console on upgrade (spadgett@redhat.com)
+- Add var for controller to enable async bindings (jpeeler@redhat.com)
+- Add cluster-operator playbook directory. (abutcher@redhat.com)
+- Move s3 & elb provisioning into their own playbooks s.t. they are applied
+ outside of the openshift_aws master provisioning tasks. (abutcher@redhat.com)
+- Update to AWS EC2 root vol size so that Health Check tasks pass
+ (mazzystr@gmail.com)
+- Configure Kuryr CNI daemon (mdulko@redhat.com)
+- Clean up host-local IPAM data while nodes are drained (danw@redhat.com)
+
* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.19.0
-
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 7dde60b7d..d538b862d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,8 +1,7 @@
---
-- name: Setup the elb and the master node group
+- name: Alert user to variables needed
hosts: localhost
tasks:
-
- name: Alert user to variables needed - clusterid
debug:
msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
@@ -11,6 +10,13 @@
debug:
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+- import_playbook: provision_s3.yml
+
+- import_playbook: provision_elb.yml
+
+- name: Create the master node group
+ hosts: localhost
+ tasks:
- name: provision cluster
import_role:
name: openshift_aws
diff --git a/playbooks/aws/openshift-cluster/provision_elb.yml b/playbooks/aws/openshift-cluster/provision_elb.yml
new file mode 100644
index 000000000..9f27dca3b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_elb.yml
@@ -0,0 +1,9 @@
+---
+- name: Create elb
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: provision elb
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_elb.yml
diff --git a/playbooks/aws/openshift-cluster/provision_s3.yml b/playbooks/aws/openshift-cluster/provision_s3.yml
new file mode 100644
index 000000000..45b439083
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Create s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: create s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/cluster-operator/aws/infrastructure.yml b/playbooks/cluster-operator/aws/infrastructure.yml
new file mode 100644
index 000000000..9669820fb
--- /dev/null
+++ b/playbooks/cluster-operator/aws/infrastructure.yml
@@ -0,0 +1,21 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_s3.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_elb.yml
diff --git a/playbooks/cluster-operator/aws/roles b/playbooks/cluster-operator/aws/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/cluster-operator/aws/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 4c1156f4b..45ddf7eea 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift_service_type }}"
+ name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 0f74e0137..a9bf354cc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -35,8 +35,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index a2f316c25..51da45311 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -15,6 +15,7 @@
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -47,8 +48,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index 1d4d1919c..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,20 +1 @@
---
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 552bea5e7..20e0c165e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -10,6 +10,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
@@ -31,8 +32,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index ef9871008..384eeed4c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -16,12 +16,18 @@
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-- name: Configure the upgrade target for the common upgrade tasks
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.9'
+ openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.8'
+ _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -35,21 +41,57 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
# Pre-upgrade completed
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ openshift_release: '3.8'
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
+
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.8'
+ openshift_release: '3.9'
+ openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
+ openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -58,13 +100,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 1d1b255c1..859b1d88b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -12,6 +12,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
index 4bd2d87b1..d8540abfb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -1,5 +1,5 @@
---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
hosts: oo_first_master
roles:
- { role: lib_openshift }
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index ccc797527..71606e7e4 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -1,4 +1,7 @@
---
-- import_playbook: ../init/evaluate_groups.yml
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index d361d6278..fb621f898 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -183,14 +183,21 @@ Then run the provision + install playbook -- this will create the OpenStack
resources:
```bash
-$ ansible-playbook --user openshift -i inventory \
- openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \
+$ ansible-playbook --user openshift \
+ -i openshift-ansible/playbooks/openstack/inventory.py
+ -i inventory \
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml \
-e openshift_repos_enable_testing=true
```
Note, you may want to use the testing repo for development purposes only.
Normally, `openshift_repos_enable_testing` should not be specified.
+In addition to *your* inventory with your OpenShift and OpenStack
+configuration, we are also supplying the [dynamic inventory][dynamic] from
+`openshift-ansible/inventory`. It's a script that will look at the Nova servers
+and other resources that will be created and let Ansible know about them.
+
If you're using multiple inventories, make sure you pass the path to
the right one to `-i`.
@@ -233,3 +240,4 @@ advanced configuration:
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
+[dynamic]: http://docs.ansible.com/ansible/latest/intro_dynamic_inventory.html
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index afa56d168..e8f4cfc32 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -1,9 +1,8 @@
## Dependencies for localhost (ansible control/admin node)
-* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
-* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
-* [jinja2](http://jinja.pocoo.org/docs/2.9/)
-* [shade](https://pypi.python.org/pypi/shade)
+* [Ansible](https://pypi.python.org/pypi/ansible) version >=2.4.0
+* [jinja2](http://jinja.pocoo.org/docs/2.9/) version >= 2.10
+* [shade](https://pypi.python.org/pypi/shade) version >= 1.26
* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
* Become (sudo) is not required.
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..76e658eb7 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/inventory.py
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index ba2f7293b..1bc1b5e43 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -72,6 +72,15 @@
- apiGroups: ["image.openshift.io", ""]
resources: ["images"]
verbs: ["get", "list"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["clusternetworks", "netnamespaces"]
+ verbs: ["get"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["netnamespaces"]
+ verbs: ["update"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete"]
- name: Create asb-access cluster role
oc_clusterrole:
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 05415a4d6..834ebba64 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -23,7 +23,7 @@
-f {{ mktemp.stdout }}/calico-policy-controller.yml
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: calico_create_output
- failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0"
changed_when: ('created' in calico_create_output.stdout)
- name: Calico Master | Delete temp directory
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index 08f2d5adc..41d0ead20 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -40,7 +40,7 @@
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
- line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+ line: 'OPTIONS="\1 --disable proxy"'
- name: force node restart to disable the proxy
service:
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
index 39348ae90..09f4c7dfe 100644
--- a/roles/kuryr/templates/cni-daemonset.yaml.j2
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -26,6 +26,13 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: IfNotPresent
command: [ "cni_ds_init" ]
+ env:
+ - name: CNI_DAEMON
+ value: "True"
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@@ -38,6 +45,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
+ - name: proc
+ mountPath: /host_proc
+ - name: openvswitch
+ mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@@ -50,4 +61,10 @@ spec:
name: kuryr-config
- name: etc
hostPath:
- path: /etc \ No newline at end of file
+ path: /etc
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: openvswitch
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 96c215f00..4bf1dbddf 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -16,17 +16,17 @@ data:
# Directory for Kuryr vif binding executables. (string value)
#bindir = /usr/libexec/kuryr
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
- # DEPRECATED: If set to false, the logging level will be set to WARNING instead
- # of the default INFO level. (boolean value)
- # This option is deprecated for removal.
- # Its value may be silently ignored in the future.
- #verbose = true
-
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
@@ -46,7 +46,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/kuryr-controller.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -65,13 +65,19 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
- #use_stderr = true
+ #use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
@@ -93,7 +99,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -106,15 +112,86 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
- driver = kuryr.lib.binding.drivers.vlan
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
link_iface = eth0
+ driver = kuryr.lib.binding.drivers.vlan
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
+
[kubernetes]
#
@@ -164,11 +241,6 @@ data:
# The driver that manages VIFs pools for Kubernetes Pods (string value)
vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
- [vif_pool]
- ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
- ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
- ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
- ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
@@ -232,13 +304,55 @@ data:
external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
+
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
+
kuryr-cni.conf: |+
[DEFAULT]
#
# From kuryr_kubernetes
#
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
@@ -263,7 +377,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/cni.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -282,6 +396,12 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
@@ -310,7 +430,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -323,14 +443,85 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
+ link_iface = eth0
driver = kuryr.lib.binding.drivers.vlan
- link_iface = {{ kuryr_cni_link_interface }}
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
[kubernetes]
@@ -341,12 +532,136 @@ data:
# The root URL of the Kubernetes API (string value)
api_root = {{ openshift.master.api_url }}
- # The token to talk to the k8s API
- token_file = /etc/kuryr/token
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
# Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
- ssl_ca_crt_file = /etc/kuryr/ca.crt
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
# HTTPS K8S_API server identity verification (boolean value)
# TODO (apuimedo): Make configurable
ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index fc14b5633..9f73510c4 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -21,13 +21,10 @@ import yaml
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
- from ansible.compat.six.moves.urllib.parse import urlparse
-except ImportError:
- from ansible.module_utils.six import string_types, u
- from ansible.module_utils.six.moves.urllib.parse import urlparse
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six import string_types, u
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_OPENSSL = False
try:
diff --git a/roles/lib_utils/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py
index ff15f693b..e67b19c28 100644
--- a/roles/lib_utils/filter_plugins/openshift_master.py
+++ b/roles/lib_utils/filter_plugins/openshift_master.py
@@ -10,11 +10,7 @@ from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
-except ImportError:
- from ansible.module_utils.six import string_types, u
+from ansible.module_utils.six import string_types, u
import yaml
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 8c8227b5e..efd2468b2 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -98,12 +98,20 @@ openshift_aws_elb_dict:
proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..a52f63bd5
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,15 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 1a34c85fc..9f46a4683 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 4f5277fa2..22a3fcd3b 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -3,7 +3,7 @@
# - excluder
- name: Get available excluder version
repoquery:
- name: "{{ excluder }}"
+ name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 5ae863871..b38ebdfb4 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -8,7 +8,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
+ failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift_is_containerized | bool
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 986a01f38..7f8c6ebdc 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck):
"""
errors = []
for pod_name in pods_by_name.keys():
- df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 77f020357..fef945d51 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: Generate GlusterFS registry endpoints
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
@@ -14,3 +20,10 @@
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index ebd2d747b..ff62b6136 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -321,9 +321,14 @@
- name: Add Kibana route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
+ value: "https://{{ openshift_logging_kibana_hostname }}"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
- key: loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index 7870f43e2..4564f33dd 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -8,8 +8,10 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
+- name: Upgrade master packages - yum
+ command:
+ yum install -y {{ master_pkgs | join(' ') }} \
+ {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
vars:
master_pkgs:
- "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
@@ -17,6 +19,21 @@
- "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}"
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
- - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
register: result
until: result is succeeded
+ when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+ dnf:
+ name: "{{ master_pkgs | join(',') }}"
+ state: present
+ vars:
+ master_pkgs:
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ register: result
+ until: result is succeeded
+ when: ansible_pkg_mgr == 'dnf'
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 0866fe0d2..4a63d081e 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -74,9 +74,14 @@
- name: Add metrics route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
+ value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
- key: metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 8ccfb7192..057963c1a 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -16,7 +16,9 @@
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -28,5 +30,7 @@
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when: "'error' in version_changed.stderr"
+ failed_when:
+ - "'error' in version_changed.stderr"
+ - "version_changed.rc != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
index ef9ab7f5f..865269b7a 100644
--- a/roles/openshift_persistent_volumes/tasks/pv.yml
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pv_create_output
when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0"
changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
index 2c5519192..6c12d128c 100644
--- a/roles/openshift_persistent_volumes/tasks/pvc.yml
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pvc_create_output
when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0"
changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index a4ce53eae..239e1f1cc 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -15,7 +15,9 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -36,7 +38,9 @@
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
+ failed_when:
+ - "'error' in generation_delete.stderr"
+ - "generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
@@ -46,6 +50,8 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml
index bdbc63d27..fea0daf77 100644
--- a/roles/openshift_version/tasks/check_available_rpms.yml
+++ b/roles/openshift_version/tasks/check_available_rpms.yml
@@ -1,7 +1,7 @@
---
- name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift_service_type}}"
+ name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: rpm_results
diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index e02a75eab..3ed1d2cfe 100644
--- a/roles/openshift_version/tasks/first_master_containerized_version.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -7,6 +7,7 @@
when:
- openshift_image_tag is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -20,7 +21,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined
+ - openshift_version is not defined or openshift_version_reinit | default(false)
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -34,7 +35,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or openshift_version_reinit | default(false)
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
index 264baca65..5d92f90c6 100644
--- a/roles/openshift_version/tasks/first_master_rpm_version.yml
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -6,6 +6,7 @@
when:
- openshift_pkg_version is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
# These tasks should only be run against masters and nodes
- name: Set openshift_version for rpm installation
@@ -13,4 +14,7 @@
- set_fact:
openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
+- set_fact:
+ openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version_reinit | default(false)
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
index fbeb22d8b..eddd5ff42 100644
--- a/roles/openshift_version/tasks/masters_and_nodes.yml
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -6,9 +6,12 @@
include_tasks: check_available_rpms.yml
- name: Fail if rpm version and docker image version are different
fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when: rpm_results.results.versions.available_versions.0 != openshift_version
+ when:
+ - openshift_version not in rpm_results.results.versions.available_versions.0
+ - openshift_version_reinit | default(false)
+
# block when
when: not openshift_is_atomic | bool
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index 12916961b..50e72657f 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -21,36 +21,68 @@
node_selector:
- ""
-- name: Make temp directory for asset config files
+- name: Make temp directory for the web console config files
command: mktemp -d /tmp/console-ansible-XXXXXX
register: mktemp
changed_when: False
-- name: Copy asset config template to temp directory
+- name: Copy the web console config template to temp directory
copy:
src: "{{ __console_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
- "{{ __console_config_file }}"
-- name: Update asset config properties
+- name: Update the web console config properties
yedit:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
edits:
- - key: logoutURL
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+
+ # TODO: The new extensions properties cannot be set until
+ # origin-web-console-server has been updated with the API changes since
+ # `extensions` in the old asset config was an array.
+
+ # - key: extensions#scriptURLs
+ # value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ # - key: extensions#stylesheetURLs
+ # value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ # - key: extensions#properties
+ # value: "{{ openshift_web_console_extension_properties | default({}) }}"
+
+ # DEPRECATED PROPERTIES
+ # These properties have been renamed and will be removed from the install
+ # in a future pull. Keep both the old and new properties for now so that
+ # the install is not broken while the origin-web-console image is updated.
- key: publicURL
# Must have a trailing slash
value: "{{ openshift.master.public_console_url }}/"
+ - key: logoutURL
+ value: "{{ openshift.master.logout_url | default('') }}"
- key: masterPublicURL
value: "{{ openshift.master.public_api_url }}"
+ separator: '#'
+ state: present
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
register: config
-- name: Apply template file
+- name: Reconcile with the web console RBAC file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+
+- name: Apply the web console template file
shell: >
{{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
index 0992b32e1..e347c0193 100644
--- a/roles/openshift_web_console/tasks/update_asset_config.yml
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -1,9 +1,9 @@
---
# This task updates asset config values in the webconsole-config config map in
# the openshift-web-console namespace. The values to set are pased in the
-# variable `asset_config_edits`, which is an array of objects with `key` and
+# variable `console_config_edits`, which is an array of objects with `key` and
# `value` properties in the same format as `yedit` module `edits`. Only
-# properties passed are updated.
+# properties passed are updated. The separator for nested properties is `#`.
#
# Note that this triggers a redeployment on the console and a brief downtime
# since it uses a `Recreate` strategy.
@@ -12,10 +12,10 @@
#
# - include_role:
# name: openshift_web_console
-# tasks_from: update_asset_config.yml
+# tasks_from: update_console_config.yml
# vars:
-# asset_config_edits:
-# - key: loggingPublicURL
+# console_config_edits:
+# - key: clusterInfo#loggingPublicURL
# value: "https://{{ openshift_logging_kibana_hostname }}"
# when: openshift_web_console_install | default(true) | bool
@@ -28,18 +28,20 @@
- name: Make temp directory
command: mktemp -d /tmp/console-ansible-XXXXXX
- register: mktemp
+ register: mktemp_console
changed_when: False
-- name: Copy asset config to temp file
+- name: Copy web console config to temp file
copy:
content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
- dest: "{{ mktemp.stdout }}/webconsole-config.yaml"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
-- name: Change asset config properties
+- name: Change web console config properties
yedit:
- src: "{{ mktemp.stdout }}/webconsole-config.yaml"
- edits: "{{asset_config_edits}}"
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
- name: Update web console config map
oc_configmap:
@@ -47,14 +49,15 @@
name: webconsole-config
state: present
from_file:
- webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml"
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
- name: Remove temp directory
file:
state: absent
- name: "{{ mktemp.stdout }}"
+ name: "{{ mktemp_console.stdout }}"
changed_when: False
+# TODO: Only rollout if config has changed.
# There's currently no command to trigger a rollout for a k8s deployment
# without changing the pod spec. Add an annotation to force a rollout after
# the config map has been edited.
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
index 80bc56a17..e91048e38 100644
--- a/roles/openshift_web_console/vars/main.yml
+++ b/roles/openshift_web_console/vars/main.yml
@@ -2,4 +2,5 @@
__console_files_location: "../../../files/origin-components/"
__console_template_file: "console-template.yaml"
+__console_rbac_file: "console-rbac-template.yaml"
__console_config_file: "console-config.yaml"
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index 433e29dde..6251cd22b 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -7,11 +7,7 @@ import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import u
-except ImportError:
- from ansible.module_utils.six import u
+from ansible.module_utils.six import u
# pylint: disable=super-init-not-called