summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/defaults/main.yml6
-rw-r--r--roles/ansible_service_broker/meta/main.yml15
-rw-r--r--roles/ansible_service_broker/tasks/install.yml269
-rw-r--r--roles/ansible_service_broker/tasks/main.yml8
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml65
-rw-r--r--roles/ansible_service_broker/tasks/validate_facts.yml15
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml14
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml14
-rw-r--r--roles/docker/handlers/main.yml5
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/etcd_common/tasks/backup.yml29
-rw-r--r--roles/etcd_migrate/tasks/check.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_status.yml8
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml20
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py2
-rw-r--r--roles/openshift_ca/tasks/main.yml59
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py21
-rw-r--r--roles/openshift_certificate_expiry/test/conftest.py5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml210
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml254
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py36
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py120
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py27
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml6
-rw-r--r--roles/openshift_logging/defaults/main.yml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml22
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml20
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j23
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j210
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml5
-rw-r--r--roles/openshift_logging_mux/templates/mux.j212
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j23
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j23
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml3
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml3
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/templates/pvc.j23
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml6
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml3
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml161
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml38
-rw-r--r--roles/openshift_service_catalog/meta/main.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml70
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml181
-rw-r--r--roles/openshift_service_catalog/tasks/main.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml56
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml22
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml86
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j280
-rw-r--r--roles/openshift_service_catalog/templates/api_server_route.j214
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j246
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_service_catalog/vars/default_images.yml3
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j22
-rw-r--r--roles/openshift_version/tasks/main.yml196
-rw-r--r--roles/rhel_subscribe/meta/main.yml3
-rw-r--r--roles/rhel_subscribe/tasks/main.yml10
80 files changed, 2166 insertions, 353 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
new file mode 100644
index 000000000..4a7252679
--- /dev/null
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+ansible_service_broker_remove: false
+ansible_service_broker_log_level: info
+# Recommended you do not enable this for now
+ansible_service_broker_launch_apb_on_bind: false
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
new file mode 100644
index 000000000..ec4aafb79
--- /dev/null
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Fabian von Feilitzsch
+ description: OpenShift Ansible Service Broker
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
new file mode 100644
index 000000000..81c3f8e5b
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -0,0 +1,269 @@
+---
+
+# Fact setting and validations
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ ansible_service_broker_image_prefix: "{{ ansible_service_broker_image_prefix | default(__ansible_service_broker_image_prefix) }}"
+ ansible_service_broker_image_tag: "{{ ansible_service_broker_image_tag | default(__ansible_service_broker_image_tag) }}"
+
+ ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}"
+ ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}"
+ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}"
+
+ ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
+ ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
+ ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
+ ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
+ ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
+
+- name: set ansible-service-broker image facts using set prefix and tag
+ set_fact:
+ ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
+ ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+
+- include: validate_facts.yml
+
+
+# Deployment of ansible-service-broker starts here
+- name: create openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ labels:
+ app: ansible-service-broker
+ service: asb
+ ports:
+ - name: port-1338
+ port: 1338
+ selector:
+ app: ansible-service-broker
+ service: asb
+
+- name: create etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ ports:
+ - name: etcd-advertise
+ port: 2379
+ selector:
+ app: ansible-service-broker
+ service: etcd
+
+- name: create route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: present
+ service_name: asb
+ port: 1338
+ register: asb_route_out
+
+- name: get ansible-service-broker route name
+ set_fact:
+ ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}"
+
+- name: create persistent volume claim for etcd
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: PersistentVolumeClaim
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+
+- name: create etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ selector:
+ matchLabels:
+ app: ansible-service-broker
+ service: etcd
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_etcd_image }}"
+ name: etcd
+ imagePullPolicy: IfNotPresent
+ terminationMessagePath: /tmp/termination-log
+ workingDir: /etcd
+ args:
+ - '{{ ansible_service_broker_etcd_image_etcd_path }}'
+ - --data-dir=/data
+ - "--listen-client-urls=http://0.0.0.0:2379"
+ - "--advertise-client-urls=http://0.0.0.0:2379"
+ ports:
+ - containerPort: 2379
+ protocol: TCP
+ env:
+ - name: ETCDCTL_API
+ value: "3"
+ volumeMounts:
+ - mountPath: /data
+ name: etcd
+ volumes:
+ - name: etcd
+ persistentVolumeClaim:
+ claimName: etcd
+
+- name: create ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ strategy:
+ type: Recreate
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ serviceAccount: asb
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_image }}"
+ name: asb
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/ansible-service-broker
+ ports:
+ - containerPort: 1338
+ protocol: TCP
+ env:
+ - name: BROKER_CONFIG
+ value: /etc/ansible-service-broker/config.yaml
+ terminationMessagePath: /tmp/termination-log
+ volumes:
+ - name: config-volume
+ configMap:
+ name: broker-config
+ items:
+ - key: broker-config
+ path: config.yaml
+
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: Create config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: ConfigMap
+ content:
+ path: /tmp/cmout
+ data:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ data:
+ broker-config: |
+ registry:
+ name: "{{ ansible_service_broker_registry_type }}"
+ url: "{{ ansible_service_broker_registry_url }}"
+ user: "{{ ansible_service_broker_registry_user }}"
+ pass: "{{ ansible_service_broker_registry_password }}"
+ org: "{{ ansible_service_broker_registry_organization }}"
+ dao:
+ etcd_host: etcd
+ etcd_port: 2379
+ log:
+ logfile: /var/log/ansible-service-broker/asb.log
+ stdout: true
+ level: "{{ ansible_service_broker_log_level }}"
+ color: true
+ openshift: {}
+ broker:
+ devbroker: false
+ launchapbonbind: "{{ ansible_service_broker_launch_apb_on_bind }}"
+
+- name: Create the Broker resource in the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: present
+ kind: Broker
+ content:
+ path: /tmp/brokerout
+ data:
+ apiVersion: servicecatalog.k8s.io/v1alpha1
+ kind: Broker
+ metadata:
+ name: ansible-service-broker
+ spec:
+ url: http://{{ ansible_service_broker_route }}
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
new file mode 100644
index 000000000..b46ce8233
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not ansible_service_broker_remove|default(false) | bool
+
+- include: remove.yml
+ when: ansible_service_broker_remove|default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..2519f9f4c
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -0,0 +1,65 @@
+---
+
+- name: remove openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove persistent volume claim for etcd
+ oc_pvc:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+- name: remove ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: remove config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: ConfigMap
+
+# TODO: Is this going to work?
+- name: remove broker object from the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: absent
+ kind: Broker
diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml
new file mode 100644
index 000000000..604d24e1d
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/validate_facts.yml
@@ -0,0 +1,15 @@
+---
+- name: validate Dockerhub registry settings
+ fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters"
+ when:
+ - ansible_service_broker_registry_type == 'dockerhub'
+ - not (ansible_service_broker_registry_user and
+ ansible_service_broker_registry_password and
+ ansible_service_broker_registry_organization)
+
+
+- name: validate RHCC registry settings
+ fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url"
+ when:
+ - ansible_service_broker_registry_type == 'rhcc'
+ - not ansible_service_broker_registry_url
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..15e448515
--- /dev/null
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd
+
+__ansible_service_broker_registry_type: dockerhub
+__ansible_service_broker_registry_url: null
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..19b4a5147
--- /dev/null
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: openshift3/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: rhel7/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
+
+__ansible_service_broker_registry_type: rhcc
+__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 7f91afb37..3a4f4ba92 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -4,6 +4,11 @@
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
+ register: r_docker_restart_docker_result
+ until: not r_docker_restart_docker_result | failed
+ retries: 1
+ delay: 30
+
when: not docker_service_status_changed | default(false) | bool
- name: restart udev
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..d9327f433 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --security-opt label=type:spc_t --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 4a4832275..1a0b857f1 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -1,10 +1,25 @@
---
+# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
+# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
+# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
- set_fact:
- l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+ l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+
+- set_fact:
+ l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
+
+- set_fact:
+ l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
+
+- set_fact:
+ l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
+
+- set_fact:
+ l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1
register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -12,7 +27,7 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
- shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
+ shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
@@ -48,19 +63,19 @@
- name: Generate etcd backup
command: >
- {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
- --backup-dir={{ l_etcd_backup_dir }}
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ l_etcd_incontainer_data_dir }}
+ --backup-dir={{ l_etcd_incontainer_backup_dir }}
# According to the docs change you can simply copy snap/db
# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
- name: Check for v3 data store
stat:
- path: "{{ etcd_data_dir }}/member/snap/db"
+ path: "{{ l_etcd_data_dir }}/member/snap/db"
register: l_v3_db
- name: Copy etcd v3 data store
command: >
- cp -a {{ etcd_data_dir }}/member/snap/db
+ cp -a {{ l_etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
when: l_v3_db.stat.exists
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
index 2f07713bc..800073873 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -6,7 +6,7 @@
# Run the migration only if the data are v2
- name: Check if there are any v3 data
command: >
- etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:2379' get "" --from-key --keys-only -w json --limit 1
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' get "" --from-key --keys-only -w json --limit 1
environment:
ETCDCTL_API: 3
register: l_etcdctl_output
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml
index 1abd6a32f..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_health.yml
@@ -1,7 +1,7 @@
---
- name: Check cluster health
command: >
- etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://{{ etcd_peer }}:2379 cluster-health
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
register: etcd_cluster_health
changed_when: false
failed_when: false
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml
index 90fe385c1..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_status.yml
@@ -2,7 +2,7 @@
# etcd_ip originates from etcd_common role
- name: Check cluster status
command: >
- etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints 'https://{{ etcd_peer }}:2379' -w json endpoint status
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' -w json endpoint status
environment:
ETCDCTL_API: 3
register: l_etcd_cluster_status
@@ -15,7 +15,7 @@
# http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers
- name: Group all raftIndices into a list
set_fact:
- etcd_members_raft_indices: "{{ groups['oo_etcd_to_config'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
+ etcd_members_raft_indices: "{{ groups['oo_etcd_to_migrate'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
- name: Check the minimum and the maximum of raftIndices is at most 1
set_fact:
@@ -24,9 +24,9 @@
- debug:
msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}"
- when: inventory_hostname in groups.oo_etcd_to_config[0]
+ when: inventory_hostname in groups.oo_etcd_to_migrate[0]
# The cluster raft status is ok if the difference of the max and min raft index is at most 1
- name: capture the status
set_fact:
- l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_config[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
+ l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_migrate[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
index cb479b0cc..7f441568a 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -20,34 +20,36 @@
- name: Check the etcd v2 data are correctly migrated
fail:
msg: "Failed to migrate a member"
- when: "'finished transforming keys' not in l_etcdctl_migrate.stdout"
+ when: "'finished transforming keys' not in l_etcdctl_migrate.stdout and 'no v2 keys to migrate' not in l_etcdctl_migrate.stdout"
+
+- name: Migration message
+ debug:
+ msg: "Etcd migration finished with: {{ l_etcdctl_migrate.stdout }}"
-# TODO(jchaloup): start the etcd on a different port so noone can access it
-# Once the validation is done
- name: Enable etcd member
service:
name: "{{ l_etcd_service }}"
state: started
+# NOTE: /usr/local/bin may be removed from the PATH by ansible hence why
+# it's added to the environment in this task.
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
oadm migrate etcd-ttl \
--cert {{ etcd_peer_cert_file }} \
--key {{ etcd_peer_key_file }} \
--cacert {{ etcd_peer_ca_file }} \
- --etcd-address 'https://{{ etcd_peer }}:2379' \
+ --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
--ttl-keys-prefix {{ item }} \
--lease-duration 1h
environment:
ETCDCTL_API: 3
+ PATH: "/usr/local/bin:/var/usrlocal/bin:{{ ansible_env.PATH }}"
with_items:
- "/kubernetes.io/events"
- "/kubernetes.io/masterleases"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+ run_once: true
- set_fact:
r_etcd_migrate_success: true
-
-- name: Enable etcd member
- service:
- name: "{{ l_etcd_service }}"
- state: started
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index 91c0d752f..955c6313e 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -194,7 +194,7 @@ def main():
)
# Verify that the platform supports atomic command
- rc, version_out, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
# This module requires atomic version 1.17.2 or later
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 16848e9c6..7b81760df 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -130,7 +130,7 @@ def main():
)
# Verify that the platform supports atomic command
- rc, version_out, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
# This module requires atomic version 1.17.2 or later
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b9a7ec32f..419679bc2 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -117,25 +117,46 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the loopback master client config
- command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
- --certificate-authority {{ named_ca_certificate }}
- {% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_ca_config_dir }}
- --groups=system:masters,system:openshift-master
- --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --signer-cert={{ openshift_ca_cert }}
- --signer-key={{ openshift_ca_key }}
- --signer-serial={{ openshift_ca_serial }}
- --user=system:openshift-master
- --basename=openshift-master
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
- --expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the loopback
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create temp directory for loopback master client config
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: openshift_ca_loopback_tmpdir
+ - name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ - name: Copy generated loopback master client config to master config dir
+ copy:
+ src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ openshift_ca_config_dir }}"
+ remote_src: true
+ with_items:
+ - openshift-master.crt
+ - openshift-master.key
+ - openshift-master.kubeconfig
+ - name: Delete temp directory
+ file:
+ name: "{{ openshift_ca_loopback_tmpdir.stdout }}"
+ state: absent
when: loopback_context_string not in loopback_config.stdout
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 0242f5b43..44a8fa29b 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -104,6 +104,7 @@ platforms missing the Python OpenSSL library.
self.extensions = []
PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
for line in self.cert_string.split('\n'):
l = line.strip()
if PARSING_ALT_NAMES:
@@ -114,10 +115,26 @@ platforms missing the Python OpenSSL library.
PARSING_ALT_NAMES = False
continue
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
# parse out the bits that we can
if l.startswith('Serial Number:'):
- # Serial Number: 11 (0xb)
- # => 11
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
self.serial = int(l.split()[-2])
elif l.startswith('Not After :'):
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py
index 4ca35ecbc..df948fff0 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/openshift_certificate_expiry/test/conftest.py
@@ -23,7 +23,10 @@ VALID_CERTIFICATE_PARAMS = [
{
'short_name': 'combined',
'cn': 'combined.example.com',
- 'serial': 6,
+ # Verify that HUGE serials parse correctly.
+ # Frobs PARSING_HEX_SERIAL in _parse_cert
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240
+ 'serial': 14449739080294792594019643629255165375,
'uses': b'clientAuth, serverAuth',
'dns': ['etcd'],
'ip': ['10.0.0.2', '192.168.0.2']
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
deleted file mode 100644
index 14bdd1dca..000000000
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cloudforms
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes-app
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..250a99b8d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv01
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
deleted file mode 100644
index 709d8d976..000000000
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: nfs-pv01
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
new file mode 100644
index 000000000..cba9bbe35
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv02
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..c08c21265
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv03
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 4f25a9c8f..3bc6c5813 100644
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
@@ -17,6 +17,7 @@ objects:
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
name: ${NAME}
spec:
+ clusterIP: None
ports:
- name: http
port: 80
@@ -48,11 +49,27 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms app image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${DATABASE_SERVICE_NAME}
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- ReadWriteOnce
@@ -62,45 +79,41 @@ objects:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${NAME}
+ name: "${NAME}-region"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
- storage: ${APPLICATION_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: "DeploymentConfig"
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+- apiVersion: apps/v1beta1
+ kind: "StatefulSet"
metadata:
name: ${NAME}
annotations:
description: "Defines how to deploy the CloudForms appliance"
spec:
+ serviceName: "${NAME}"
+ replicas: 1
template:
metadata:
labels:
name: ${NAME}
name: ${NAME}
spec:
- volumes:
- -
- name: "cfme-app-volume"
- persistentVolumeClaim:
- claimName: ${NAME}
containers:
- - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
- imagePullPolicy: IfNotPresent
- name: cloudforms
+ - name: cloudforms
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
livenessProbe:
- httpGet:
- path: /
- port: 80
+ tcpSocket:
+ port: 443
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /
- port: 80
+ port: 443
+ scheme: HTTPS
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -112,8 +125,11 @@ objects:
privileged: true
volumeMounts:
-
- name: "cfme-app-volume"
+ name: "${NAME}-server"
mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
env:
-
name: "APPLICATION_INIT_DELAY"
@@ -144,29 +160,32 @@ objects:
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
requests:
- memory: "${MEMORY_APPLICATION_MIN}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
lifecycle:
preStop:
exec:
command:
- /opt/rh/cfme-container-scripts/sync-pv-data
- replicas: 1
- selector:
- name: ${NAME}
- triggers:
- - type: "ConfigChange"
- - type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "cloudforms"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
- strategy:
- type: "Recreate"
- recreateParams:
- timeoutSeconds: 1200
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -182,14 +201,6 @@ objects:
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -223,7 +234,7 @@ objects:
containers:
-
name: "memcached"
- image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
ports:
-
containerPort: 11211
@@ -249,8 +260,11 @@ objects:
name: "MEMCACHED_SLAB_PAGE_SIZE"
value: "${MEMCACHED_SLAB_PAGE_SIZE}"
resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
limits:
- memory: "${MEMORY_MEMCACHED_LIMIT}"
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -266,14 +280,6 @@ objects:
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${DATABASE_SERVICE_NAME}"
@@ -307,11 +313,11 @@ objects:
-
name: "cfme-pgdb-volume"
persistentVolumeClaim:
- claimName: ${DATABASE_SERVICE_NAME}
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
containers:
-
name: "postgresql"
- image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
ports:
-
containerPort: 5432
@@ -350,8 +356,11 @@ objects:
name: "POSTGRESQL_SHARED_BUFFERS"
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
limits:
- memory: "${MEMORY_POSTGRESQL_LIMIT}"
+ memory: "${POSTGRESQL_MEM_LIMIT}"
parameters:
-
@@ -420,36 +429,87 @@ parameters:
name: "POSTGRESQL_SHARED_BUFFERS"
displayName: "PostgreSQL Shared Buffer Amount"
description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "64MB"
+ value: "256MB"
-
- name: "MEMORY_APPLICATION_MIN"
- displayName: "Application Memory Minimum"
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
required: true
description: "Minimum amount of memory the Application container will need."
- value: "4096Mi"
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
-
- name: "MEMORY_POSTGRESQL_LIMIT"
- displayName: "PostgreSQL Memory Limit"
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
required: true
- description: "Maximum amount of memory the PostgreSQL container can use."
- value: "2048Mi"
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
-
- name: "MEMORY_MEMCACHED_LIMIT"
- displayName: "Memcached Memory Limit"
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
required: true
- description: "Maximum amount of memory the Memcached container can use."
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
value: "256Mi"
-
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
+ -
name: "POSTGRESQL_IMG_TAG"
displayName: "PostgreSQL Image Tag"
description: "This is the PostgreSQL image tag/version requested to deploy."
value: "latest"
-
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
+ -
name: "MEMCACHED_IMG_TAG"
displayName: "Memcached Image Tag"
description: "This is the Memcached image tag/version requested to deploy."
value: "latest"
-
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
+ -
name: "APPLICATION_IMG_TAG"
displayName: "Application Image Tag"
description: "This is the Application image tag/version requested to deploy."
@@ -464,16 +524,22 @@ parameters:
displayName: "Application Init Delay"
required: true
description: "Delay in seconds before we attempt to initialize the application."
- value: "30"
+ value: "15"
-
name: "APPLICATION_VOLUME_CAPACITY"
displayName: "Application Volume Capacity"
required: true
description: "Volume space available for application data."
- value: "1Gi"
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
-
name: "DATABASE_VOLUME_CAPACITY"
displayName: "Database Volume Capacity"
required: true
description: "Volume space available for database."
- value: "1Gi"
+ value: "15Gi"
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0788ddfb0..cc2a1d2eb 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -537,6 +537,7 @@ def set_node_schedulability(facts):
return facts
+# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -570,6 +571,10 @@ def set_selectors(facts):
facts['hosted']['logging'] = {}
if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
facts['hosted']['logging']['selector'] = None
+ if 'etcd' not in facts['hosted']:
+ facts['hosted']['etcd'] = {}
+ if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
+ facts['hosted']['etcd']['selector'] = None
return facts
@@ -907,17 +912,17 @@ def set_version_facts_if_unset(facts):
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
- version_gte_3_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
@@ -2157,6 +2162,25 @@ class OpenShiftFacts(object):
create_pvc=False
)
),
+ etcd=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='etcd',
+ size='1Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 962148cb8..e93e81efa 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -1,9 +1,12 @@
-# pylint: disable=missing-docstring
+"""Check that there is enough disk space in predefined paths."""
+
+import os.path
+import tempfile
+
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
-from openshift_checks.mixins import NotContainerizedMixin
-class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
+class DiskAvailability(OpenShiftCheck):
"""Check that recommended disk space is available before a first-time install."""
name = "disk_availability"
@@ -12,56 +15,101 @@ class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
- "masters": 40 * 10**9,
- "nodes": 15 * 10**9,
- "etcd": 20 * 10**9,
+ '/var': {
+ 'masters': 40 * 10**9,
+ 'nodes': 15 * 10**9,
+ 'etcd': 20 * 10**9,
+ },
+ # Used to copy client binaries into,
+ # see roles/openshift_cli/library/openshift_container_binary_sync.py.
+ '/usr/local/bin': {
+ 'masters': 1 * 10**9,
+ 'nodes': 1 * 10**9,
+ 'etcd': 1 * 10**9,
+ },
+ # Used as temporary storage in several cases.
+ tempfile.gettempdir(): {
+ 'masters': 1 * 10**9,
+ 'nodes': 1 * 10**9,
+ 'etcd': 1 * 10**9,
+ },
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have recommended disk space requirements."""
group_names = get_var(task_vars, "group_names", default=[])
- has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes))
+ active_groups = set()
+ for recommendation in cls.recommended_disk_space_bytes.values():
+ active_groups.update(recommendation.keys())
+ has_disk_space_recommendation = bool(active_groups.intersection(group_names))
return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
ansible_mounts = get_var(task_vars, "ansible_mounts")
- free_bytes = self.openshift_available_disk(ansible_mounts)
-
- recommended_min = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
- configured_min = int(get_var(task_vars, "openshift_check_min_host_disk_gb", default=0)) * 10**9
- min_free_bytes = configured_min or recommended_min
-
- if free_bytes < min_free_bytes:
- return {
- 'failed': True,
- 'msg': (
- 'Available disk space ({:.1f} GB) for the volume containing '
- '"/var" is below minimum recommended space ({:.1f} GB)'
- ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9)
+ ansible_mounts = {mount['mount']: mount for mount in ansible_mounts}
+
+ user_config = get_var(task_vars, "openshift_check_min_host_disk_gb", default={})
+ try:
+ # For backwards-compatibility, if openshift_check_min_host_disk_gb
+ # is a number, then it overrides the required config for '/var'.
+ number = float(user_config)
+ user_config = {
+ '/var': {
+ 'masters': number,
+ 'nodes': number,
+ 'etcd': number,
+ },
}
+ except TypeError:
+ # If it is not a number, then it should be a nested dict.
+ pass
+
+ # TODO: as suggested in
+ # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
+ # maybe we could support checking disk availability in paths that are
+ # not part of the official recommendation but present in the user
+ # configuration.
+ for path, recommendation in self.recommended_disk_space_bytes.items():
+ free_bytes = self.free_bytes(path, ansible_mounts)
+ recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
+
+ config = user_config.get(path, {})
+ # NOTE: the user config is in GB, but we compare bytes, thus the
+ # conversion.
+ config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
+ recommended_bytes = config_bytes or recommended_bytes
+
+ if free_bytes < recommended_bytes:
+ free_gb = float(free_bytes) / 10**9
+ recommended_gb = float(recommended_bytes) / 10**9
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available disk space in "{}" ({:.1f} GB) '
+ 'is below minimum recommended ({:.1f} GB)'
+ ).format(path, free_gb, recommended_gb)
+ }
return {}
@staticmethod
- def openshift_available_disk(ansible_mounts):
- """Determine the available disk space for an OpenShift installation.
-
- ansible_mounts should be a list of dicts like the 'setup' Ansible module
- returns.
- """
- # priority list in descending order
- supported_mnt_paths = ["/var", "/"]
- available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+ def free_bytes(path, ansible_mounts):
+ """Return the size available in path based on ansible_mounts."""
+ mount_point = path
+ # arbitry value to prevent an infinite loop, in the unlike case that '/'
+ # is not in ansible_mounts.
+ max_depth = 32
+ while mount_point not in ansible_mounts and max_depth > 0:
+ mount_point = os.path.dirname(mount_point)
+ max_depth -= 1
try:
- for path in supported_mnt_paths:
- if path in available_mnts:
- return available_mnts[path]["size_available"]
+ free_bytes = ansible_mounts[mount_point]['size_available']
except KeyError:
- pass
+ known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(ansible_mounts)) or 'none'
+ msg = 'Unable to determine disk availability for "{}". Known mount points: {}.'
+ raise OpenShiftCheckException(msg.format(path, known_mounts))
- paths = ''.join(sorted(available_mnts)) or 'none'
- msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths)
- raise OpenShiftCheckException(msg)
+ return free_bytes
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index b353fa610..945b9eafc 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -3,22 +3,19 @@ import pytest
from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
- # ensure check is skipped on containerized installs
- (['masters'], True, False),
- (['nodes'], False, True),
- (['etcd'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
- ([], False, False),
- (['lb'], False, False),
- (['nfs'], False, False),
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
])
-def test_is_active(group_names, is_containerized, is_active):
+def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
)
assert DiskAvailability.is_active(task_vars=task_vars) == is_active
@@ -38,7 +35,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
with pytest.raises(OpenShiftCheckException) as excinfo:
check.run(tmp=None, task_vars=task_vars)
- for word in 'determine available disk'.split() + extra_words:
+ for word in 'determine disk availability'.split() + extra_words:
assert word in str(excinfo.value)
@@ -81,7 +78,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
[{
# not enough space on / ...
'mount': '/',
- 'size_available': 0,
+ 'size_available': 2 * 10**9,
}, {
# ... but enough on /var
'mount': '/var',
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d895e9a68..2eeb2e7ce 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -135,7 +135,7 @@
- name: Determine the latest version of the OpenShift registry deployment
command: |
- oc get deploymentconfig {{ openshift_hosted_registry_name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -143,7 +143,7 @@
- name: Sanity-check that the OpenShift registry rolled out correctly
command: |
- oc get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 160ae2f5e..c60b67862 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -92,7 +92,7 @@
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
- oc rollout status deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
--namespace {{ item.namespace | default('default') }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig
async: 600
@@ -102,7 +102,7 @@
- name: Determine the latest version of the OpenShift router deployment
command: |
- oc get deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
--namespace {{ item.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -111,7 +111,7 @@
- name: Poll for OpenShift router deployment success
command: |
- oc get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
--namespace {{ item.0.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 66d880d23..c243a6e4a 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -89,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -128,7 +128,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}"
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 66dc0e096..221a81340 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -105,20 +105,22 @@
- set_fact: es_ops_indices=[]
when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count == 0
+- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
+ when: openshift_logging_es_ops_pvc_prefix == ""
- include_role:
name: openshift_logging_elasticsearch
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
- openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -139,14 +141,14 @@
name: openshift_logging_elasticsearch
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
- openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 684dbe0a0..68726aa78 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -173,9 +173,8 @@
selector:
component: "{{ es_component }}"
provider: openshift
- # pending #4091
- #labels:
- #- logging-infra: 'support'
+ labels:
+ logging-infra: 'support'
ports:
- port: 9300
@@ -187,14 +186,15 @@
selector:
component: "{{ es_component }}"
provider: openshift
- # pending #4091
- #labels:
- #- logging-infra: 'support'
+ labels:
+ logging-infra: 'support'
ports:
- port: 9200
targetPort: "restapi"
-- name: Creating ES storage template
+# storageclasses are used by default but if static then disable
+# storageclasses with the storageClassName set to "" in pvc.j2
+- name: Creating ES storage template - static
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -203,11 +203,13 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- not openshift_logging_elasticsearch_pvc_dynamic
-- name: Creating ES storage template
+# Storageclasses are used by default if configured
+- name: Creating ES storage template - dynamic
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -216,8 +218,6 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- annotations:
- volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 844dbc8c2..1ca4220a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -78,6 +78,9 @@ spec:
name: "INSTANCE_RAM"
value: "{{openshift_logging_elasticsearch_memory_limit}}"
-
+ name: "HEAP_DUMP_LOCATION"
+ value: "/elasticsearch/persistent/heapdump.hprof"
+ -
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
-
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index f19a3a750..063f9c5ae 100644
--- a/roles/openshift_logging_elasticsearch/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -25,3 +25,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 8194223e8..30b596e22 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -100,7 +100,7 @@
- copy:
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
+ when: fluentd_secureforward_contents is undefined
changed_when: no
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index a5695ee26..d9814370f 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -98,9 +98,15 @@ spec:
- name: "BUFFER_SIZE_LIMIT"
value: "{{ openshift_logging_fluentd_buffer_size_limit }}"
- name: "FLUENTD_CPU_LIMIT"
- value: "{{ openshift_logging_fluentd_cpu_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.cpu
- name: "FLUENTD_MEMORY_LIMIT"
- value: "{{ openshift_logging_fluentd_memory_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.memory
volumes:
- name: runlogjournal
hostPath:
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index bae55ffaa..93cb82793 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -89,9 +89,8 @@
selector:
component: "{{ kibana_component }}"
provider: openshift
- # pending #4091
- #labels:
- #- logging-infra: 'support'
+ labels:
+ logging-infra: 'support'
ports:
- port: 443
targetPort: "oaproxy"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 243698c6a..c3f9b3433 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -102,15 +102,21 @@ spec:
- name: USE_MUX
value: "true"
- name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external | default('false') }}"
+ value: "{{ openshift_logging_mux_allow_external | default('false') | lower }}"
- name: "BUFFER_QUEUE_LIMIT"
value: "{{ openshift_logging_mux_buffer_queue_limit }}"
- name: "BUFFER_SIZE_LIMIT"
value: "{{ openshift_logging_mux_buffer_size_limit }}"
- name: "MUX_CPU_LIMIT"
- value: "{{ openshift_logging_mux_cpu_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.cpu
- name: "MUX_MEMORY_LIMIT"
- value: "{{ openshift_logging_mux_memory_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.memory
volumes:
- name: config
configMap:
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 897ee7285..e8f7c47b0 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 451f3436a..69db62f16 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 7f40cb042..31c1dfc33 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index c484d23cc..c05a27559 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index e0adbbf52..a153fb33d 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 62413536b..d9ffb1b6f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -74,10 +74,10 @@
- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
--master={{ hostvars[item].openshift.master.loopback_api_url }}
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 7b81b3c10..8d7ee00ed 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -17,14 +17,17 @@
local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
+ become: false
- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd"
register: hawkular_metrics_pwd
no_log: true
+ become: false
- name: generate htpasswd file for hawkular metrics
local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
no_log: true
+ become: false
- name: copy local generated passwords to target
copy:
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 62b7f52cb..7928a0346 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -36,6 +36,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,8 +51,6 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- annotations:
- volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
index cc533a68b..7c9bc26d0 100644
--- a/roles/openshift_metrics/tasks/install_hosa.yaml
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -28,7 +28,7 @@
- name: Generate role binding for the hawkular-openshift-agent service account
template:
src: rolebinding.j2
- dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml"
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-openshift-agent-rolebinding.yaml"
vars:
cluster: True
obj_name: hawkular-openshift-agent-rb
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index 5cefb273d..584e3be05 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -4,6 +4,7 @@
register: htpasswd_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node"
when: htpasswd_check.rc == 1
@@ -13,6 +14,7 @@
register: keytool_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
when: keytool_check.rc == 1
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 0b5f23c24..eaabdd20f 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,6 +1,7 @@
---
- local_action: shell python -c 'import passlib' 2>/dev/null || echo not installed
register: passlib_result
+ become: false
- name: Check that python-passlib is available on the control host
assert:
@@ -52,3 +53,4 @@
tags: metrics_cleanup
changed_when: False
check_mode: no
+ become: false
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index 0b801b33f..b4e6a1503 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -32,3 +32,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
new file mode 100644
index 000000000..6ed6d404c
--- /dev/null
+++ b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use with openshift named certificates
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use with openshift named certificates'''
+
+ @staticmethod
+ def oo_named_certificates_list(named_certificates):
+ ''' Returns named certificates list with correct fields for the master
+ config file.'''
+ return [{'certFile': named_certificate['certfile'],
+ 'keyFile': named_certificate['keyfile'],
+ 'names': named_certificate['names']} for named_certificate in named_certificates]
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index d89b64b06..cd0a1a60b 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -17,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 508eb9358..b24d8cec7 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -16,7 +16,11 @@
- name: Restart docker
service:
name: "{{ openshift.docker.service_name }}"
- state: restarted
+ state: started
+ register: docker_start_result
+ until: not docker_start_result | failed
+ retries: 1
+ delay: 30
- name: Update docker facts
openshift_facts:
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
new file mode 100644
index 000000000..01ee2544d
--- /dev/null
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_service_catalog_remove: false
+openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
new file mode 100644
index 000000000..880146ca4
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -0,0 +1,161 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: service-catalog
+objects:
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer
+ rules:
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - serviceclasses
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer-binding
+ roleRef:
+ name: servicecatalog-serviceclass-viewer
+ groupNames:
+ - system:authenticated
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: sar-creator
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - subjectaccessreviews.authorization.k8s.io
+ verbs:
+ - create
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-sar-creator-binding
+ roleRef:
+ name: sar-creator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: namespace-viewer
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - brokers/status
+ - instances/status
+ - bindings/status
+ verbs:
+ - update
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-binding
+ roleRef:
+ name: service-catalog-controller
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - list
+ - watch
+ - get
+ - create
+ - update
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor-binding
+ roleRef:
+ name: endpoint-accessor
+ namespace: kube-service-catalog
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: system:auth-delegator-binding
+ roleRef:
+ name: system:auth-delegator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
new file mode 100644
index 000000000..f6ee0955d
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: kube-system-service-catalog
+objects:
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - extension-apiserver-authentication
+ resources:
+ - configmaps
+ verbs:
+ - get
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader-binding
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ roleRef:
+ name: extension-apiserver-authentication-reader
+ namespace: kube-system
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+parameters:
+- description: Do not change this value.
+ displayName: Name of the kube-system namespace
+ name: KUBE_SYSTEM_NAMESPACE
+ required: true
+ value: kube-system
diff --git a/roles/openshift_service_catalog/meta/main.yml b/roles/openshift_service_catalog/meta/main.yml
new file mode 100644
index 000000000..1e6b837cd
--- /dev/null
+++ b/roles/openshift_service_catalog/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Service Catalog
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
new file mode 100644
index 000000000..cc897b032
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -0,0 +1,70 @@
+---
+- name: Create service catalog cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/service-catalog"
+ state: directory
+ mode: 0755
+ changed_when: False
+ check_mode: no
+
+- set_fact:
+ generated_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+
+- name: Generate signing cert
+ command: >
+ {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
+ --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+
+- name: Generating server keys
+ oc_adm_ca_server_cert:
+ cert: "{{ generated_certs_dir }}/apiserver.crt"
+ key: "{{ generated_certs_dir }}/apiserver.key"
+ hostnames: "apiserver.kube-service-catalog.svc,apiserver.kube-service-catalog.svc.cluster.local,apiserver.kube-service-catalog"
+ signer_cert: "{{ generated_certs_dir }}/ca.crt"
+ signer_key: "{{ generated_certs_dir }}/ca.key"
+ signer_serial: "{{ generated_certs_dir }}/apiserver.serial.txt"
+
+- name: Create apiserver-ssl secret
+ oc_secret:
+ state: present
+ name: apiserver-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ - name: tls.key
+ path: "{{ generated_certs_dir }}/apiserver.key"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/ca.crt"
+ register: apiserver_ca
+
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
+- name: Create api service
+ oc_obj:
+ state: present
+ name: v1alpha1.servicecatalog.k8s.io
+ kind: apiservices.apiregistration.k8s.io
+ namespace: "kube-service-catalog"
+ content:
+ path: /tmp/apisvcout
+ data:
+ apiVersion: apiregistration.k8s.io/v1beta1
+ kind: APIService
+ metadata:
+ name: v1alpha1.servicecatalog.k8s.io
+ spec:
+ group: servicecatalog.k8s.io
+ version: v1alpha1
+ service:
+ namespace: "kube-service-catalog"
+ name: apiserver
+ caBundle: "{{ apiserver_ca.content }}"
+ groupPriorityMinimum: 20
+ versionPriority: 10
+ when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
new file mode 100644
index 000000000..c1773b5f6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -0,0 +1,181 @@
+---
+# do any asserts here
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- include: wire_aggregator.yml
+
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set service_catalog image facts
+ set_fact:
+ openshift_service_catalog_image_prefix: "{{ openshift_service_catalog_image_prefix | default(__openshift_service_catalog_image_prefix) }}"
+ openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(__openshift_service_catalog_image_version) }}"
+
+- name: Set Service Catalog namespace
+ oc_project:
+ state: present
+ name: "kube-service-catalog"
+# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}"
+
+- include: generate_certs.yml
+
+- copy:
+ src: kubeservicecatalog_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+
+- oc_obj:
+ name: service-catalog
+ kind: template
+ namespace: "kube-service-catalog"
+ files:
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: service-catalog
+ namespace: "kube-service-catalog"
+
+- copy:
+ src: kubesystem_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+
+- oc_obj:
+ name: kube-system-service-catalog
+ kind: template
+ namespace: kube-system
+ files:
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: kube-system-service-catalog
+ namespace: kube-system
+
+- shell: >
+ oc get policybindings/kube-system:default -n kube-system || echo "not found"
+ register: get_kube_system
+ changed_when: no
+
+- command: >
+ oc create policybinding kube-system -n kube-system
+ when: "'not found' in get_kube_system.stdout"
+
+- oc_adm_policy_user:
+ namespace: kube-service-catalog
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:kube-service-catalog:service-catalog-apiserver"
+
+- name: Set SA cluster-role
+ oc_adm_policy_user:
+ state: present
+ namespace: "kube-service-catalog"
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:kube-service-catalog:default"
+
+## api server
+- template:
+ src: api_server.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ vars:
+ image: ""
+ namespace: ""
+ cpu_limit: none
+ memory_limit: none
+ cpu_requests: none
+ memory_request: none
+ cors_allowed_origin: localhost
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Service Catalog API Server daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_service.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+
+- name: Set Service Catalog API Server service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_route.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+
+- name: Set Service Catalog API Server route
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ delete_after: yes
+
+## controller manager
+- template:
+ src: controller_manager.j2
+ dest: "{{ mktemp.stdout }}/controller_manager.yml"
+ vars:
+ image: ""
+ cpu_limit: none
+ memory_limit: none
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Controller Manager deployment
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager.yml"
+ delete_after: yes
+
+- template:
+ src: controller_manager_service.j2
+ dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
+
+- name: Set Controller Manager service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager_service.yml"
+ delete_after: yes
+
+- include: start_api_server.yml
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml
new file mode 100644
index 000000000..dc0d6a370
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not openshift_service_catalog_remove | default(false) | bool
+
+- include: remove.yml
+ when: openshift_service_catalog_remove | default(false) | bool
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
new file mode 100644
index 000000000..2fb1ec440
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -0,0 +1,56 @@
+---
+- name: Remove Service Catalog APIServer
+ command: >
+ oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+
+- name: Remove Policy Binding
+ command: >
+ oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+
+# TODO: this module doesn't currently remove this
+#- name: Remove service catalog api service
+# oc_obj:
+# state: absent
+# namespace: "kube-service-catalog"
+# kind: apiservices.apiregistration.k8s.io
+# name: v1alpha1.servicecatalog.k8s.io
+
+- name: Remove Service Catalog API Server route
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+
+- name: Remove Service Catalog API Server service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+
+- name: Remove Service Catalog API Server daemonset
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+
+- name: Remove Controller Manager service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+
+- name: Remove Controller Manager deployment
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: deployment
+ name: controller-manager
+
+- name: Remove Service Catalog namespace
+ oc_project:
+ state: absent
+ name: "kube-service-catalog"
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
new file mode 100644
index 000000000..b143292b6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -0,0 +1,22 @@
+---
+# Label nodes and wait for apiserver and controller to be running (at least one)
+- name: Label {{ openshift.node.nodename }} for APIServer and controller deployment
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+
+# wait to see that the apiserver is available
+- name: wait for api server to be ready
+ command: >
+ curl -k https://apiserver.kube-service-catalog.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..3e5897ba4
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -0,0 +1,86 @@
+---
+# TODO: this currently has a bug where hostnames are required
+- name: Creating Aggregator signer certs
+ command: >
+ oc adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Create api-client config for Aggregator
+ command: >
+ oc adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir=/etc/origin/master
+ --signer-serial=/etc/origin/master/ca.serial.txt
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ register: yedit_output
+
+#restart master serially here
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is not defined or not openshift.master.ha | bool
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
new file mode 100644
index 000000000..8ae6b6c8d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -0,0 +1,80 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: apiserver
+ name: apiserver
+spec:
+ selector:
+ matchLabels:
+ app: apiserver
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: apiserver
+ spec:
+ serviceAccountName: service-catalog-apiserver
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - args:
+ - --storage-type
+ - etcd
+ - --secure-port
+ - "6443"
+ - --etcd-servers
+# TODO: come back and get openshift.common.hostname to work
+ - https://{{ openshift.common.ip }}:{{ openshift.master.etcd_port }}
+ - --etcd-cafile
+ - /etc/origin/master/master.etcd-ca.crt
+ - --etcd-certfile
+ - /etc/origin/master/master.etcd-client.crt
+ - --etcd-keyfile
+ - /etc/origin/master/master.etcd-client.key
+ - -v
+ - "10"
+ - --cors-allowed-origins
+ - {{ cors_allowed_origin }}
+ - --admission-control
+ - "KubernetesNamespaceLifecycle"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/apiserver"]
+ imagePullPolicy: Always
+ name: apiserver
+ ports:
+ - containerPort: 6443
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: apiserver-ssl
+ readOnly: true
+ - mountPath: /etc/origin/master
+ name: etcd-host-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: apiserver-ssl
+ secret:
+ defaultMode: 420
+ secretName: apiserver-ssl
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ - hostPath:
+ path: /etc/origin/master
+ name: etcd-host-cert
+ - emptyDir: {}
+ name: data-dir
diff --git a/roles/openshift_service_catalog/templates/api_server_route.j2 b/roles/openshift_service_catalog/templates/api_server_route.j2
new file mode 100644
index 000000000..3c3da254d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_route.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Route
+metadata:
+ name: apiserver
+spec:
+ port:
+ targetPort: secure
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: apiserver
+ weight: 100
+ wildcardPolicy: None
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
new file mode 100644
index 000000000..bae337201
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: apiserver
+spec:
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
new file mode 100644
index 000000000..33932eeb7
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: controller-manager
+ name: controller-manager
+spec:
+ selector:
+ matchLabels:
+ app: controller-manager
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: controller-manager
+ spec:
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - env:
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ args:
+ - -v
+ - "5"
+ - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/controller-manager"]
+ imagePullPolicy: Always
+ name: controller-manager
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
new file mode 100644
index 000000000..2bac645fc
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: controller-manager
+spec:
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml
new file mode 100644
index 000000000..6fb9d1b86
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-"
+__openshift_service_catalog_image_version: "latest"
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..8c3f14485
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_service_catalog_image_version: "3.6.0"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4406ef28b..af901103e 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -26,7 +26,7 @@
- kind: "sa"
name: "heketi-{{ glusterfs_name }}-service-account"
- kind: "secret"
- name: "heketi-{{ glusterfs_name }}-user-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -66,6 +66,7 @@
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
@@ -74,6 +75,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
@@ -148,7 +150,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
@@ -170,7 +172,7 @@
oc_secret:
namespace: "{{ glusterfs_namespace }}"
state: present
- name: "heketi-{{ glusterfs_name }}-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
type: "kubernetes.io/glusterfs"
force: True
contents:
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 26343b909..63009c539 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -125,7 +125,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 5ea801e60..2ec9a9e9a 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -8,4 +8,4 @@ parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
restuser: "admin"
secretNamespace: "{{ glusterfs_namespace }}"
- secretName: "heketi-{{ glusterfs_name }}-secret"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 16792388f..f4cb8ddb2 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -84,115 +84,119 @@
- openshift_version is not defined
- openshift_protect_installed_version | bool
-- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
- when: not is_containerized | bool
-
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
- when: is_containerized | bool
-
-- block:
- - name: Get available {{ openshift.common.service_type}} version
- repoquery:
- name: "{{ openshift.common.service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift.common.service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
+# The rest of these tasks should only execute on
+# masters and nodes as we can verify they have subscriptions
+- when:
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ block:
+ - name: Set openshift_version for rpm installation
+ include: set_version_rpm.yml
+ when: not is_containerized | bool
+
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ when: is_containerized | bool
+
+ - block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
-# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
-# NOTE: This will need to be modified/removed for future container + rpm installations work.
-- name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
+ - is_containerized | bool
+ - not is_atomic | bool
+
+ # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+ # NOTE: This will need to be modified/removed for future container + rpm installations work.
+ - name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
-# At this point we know openshift_version is set appropriately. Now we set
-# openshift_image_tag and openshift_pkg_version, so all roles can always assume
-# each of this variables *will* be set correctly and can use them per their
-# intended purpose.
+ # At this point we know openshift_version is set appropriately. Now we set
+ # openshift_image_tag and openshift_pkg_version, so all roles can always assume
+ # each of this variables *will* be set correctly and can use them per their
+ # intended purpose.
-- block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
+ when: openshift_image_tag is not defined
-- block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when: openshift_pkg_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
+ when: openshift_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
+ when: openshift_image_tag is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
+ when: openshift_pkg_version is not defined
-- fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
+ - fail:
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers; make sure
-# the rpm version we looked up matches the release requested and error out if not.
-- name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
+ # We can't map an openshift_release to full rpm version like we can with containers; make sure
+ # the rpm version we looked up matches the release requested and error out if not.
+ - name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
-# The end result of these three variables is quite important so make sure they are displayed and logged:
-- debug: var=openshift_release
+ # The end result of these three variables is quite important so make sure they are displayed and logged:
+ - debug: var=openshift_release
-- debug: var=openshift_image_tag
+ - debug: var=openshift_image_tag
-- debug: var=openshift_pkg_version
+ - debug: var=openshift_pkg_version
diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml
index 0bbeadd34..23d65c7ef 100644
--- a/roles/rhel_subscribe/meta/main.yml
+++ b/roles/rhel_subscribe/meta/main.yml
@@ -1,3 +1,2 @@
---
-dependencies:
- - role: openshift_facts
+dependencies: []
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 28c3c7080..453044a6e 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -21,6 +21,11 @@
msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
when: rhel_subscription_pass is not defined
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
- name: Satellite preparation
command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
args:
@@ -57,5 +62,6 @@
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
- when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] and
- not openshift.common.is_atomic | bool
+ when:
+ - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
+ - not ostree_booted.stat.exists | bool