summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/defaults/main.yml1
-rw-r--r--roles/ansible_service_broker/tasks/generate_certs.yml35
-rw-r--r--roles/ansible_service_broker/tasks/install.yml146
-rw-r--r--roles/ansible_service_broker/tasks/main.yml2
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml32
-rw-r--r--roles/ansible_service_broker/tasks/validate_facts.yml6
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml2
-rw-r--r--roles/docker/defaults/main.yml15
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/crio_firewall.yml40
-rw-r--r--roles/docker/tasks/main.yml56
-rw-r--r--roles/docker/tasks/package_docker.yml1
-rw-r--r--roles/docker/tasks/registry_auth.yml4
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml14
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml2
-rw-r--r--roles/docker/templates/crio.conf.j22
-rw-r--r--roles/etcd/tasks/migration/check.yml11
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py10
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py2
-rw-r--r--roles/nuage_master/handlers/main.yaml7
-rw-r--r--roles/openshift_aws/defaults/main.yml40
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py6
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml4
-rw-r--r--roles/openshift_aws/tasks/elb.yml27
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml2
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml10
-rw-r--r--roles/openshift_aws/tasks/provision.yml46
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_aws/tasks/security_group.yml3
-rw-r--r--roles/openshift_aws/templates/user_data.j22
-rw-r--r--roles/openshift_ca/tasks/main.yml34
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py29
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml6
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/defaults/main.yml3
-rw-r--r--roles/openshift_docker_gc/meta/main.yml13
-rw-r--r--roles/openshift_docker_gc/tasks/main.yaml27
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j258
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json316
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json214
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py2
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh6
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh26
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py2
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py23
-rw-r--r--roles/openshift_hosted/tasks/router.yml9
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml7
-rw-r--r--roles/openshift_logging/README.md2
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py27
-rw-r--r--roles/openshift_logging/filter_plugins/test15
-rw-r--r--roles/openshift_logging/handlers/main.yml7
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml12
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh79
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_management/README.md210
-rw-r--r--roles/openshift_management/defaults/main.yml14
-rw-r--r--roles/openshift_management/files/examples/container_providers.yml22
-rw-r--r--roles/openshift_management/filter_plugins/oo_management_filters.py32
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml77
-rw-r--r--roles/openshift_management/tasks/main.yml29
-rw-r--r--roles/openshift_management/tasks/noop.yml1
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml8
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml31
-rw-r--r--roles/openshift_management/tasks/storage/nfs_server.yml45
-rw-r--r--roles/openshift_management/tasks/template.yml26
-rw-r--r--roles/openshift_management/tasks/validate.yml15
-rw-r--r--roles/openshift_master/defaults/main.yml10
-rw-r--r--roles/openshift_master/handlers/main.yml9
-rw-r--r--roles/openshift_master/tasks/bootstrap.yml21
-rw-r--r--roles/openshift_master/tasks/clean_systemd_units.yml9
-rw-r--r--roles/openshift_master/tasks/journald.yml13
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml3
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml27
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j210
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py20
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/handlers/main.yml7
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml7
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml10
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml3
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml5
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh2
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j29
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml3
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml10
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml5
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j250
-rw-r--r--roles/openshift_prometheus/vars/default_images.yml12
-rw-r--r--roles/openshift_prometheus/vars/openshift-enterprise.yml12
-rw-r--r--roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/generate_secrets.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml8
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml17
-rw-r--r--roles/openshift_provisioners/templates/pv.j21
-rw-r--r--roles/openshift_provisioners/templates/pvc.j21
-rw-r--r--roles/openshift_repos/tasks/main.yaml5
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml20
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml2
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml41
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml6
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j23
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j23
-rw-r--r--roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 (renamed from roles/openshift_service_catalog/templates/sc_role_patching.j2)1
-rw-r--r--roles/openshift_service_catalog/templates/sc_view_role_patching.j211
-rw-r--r--roles/openshift_storage_glusterfs/README.md14
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml135
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/kernel_modules.yml12
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j249
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js2
-rw-r--r--roles/template_service_broker/tasks/install.yml2
-rw-r--r--roles/template_service_broker/tasks/main.yml2
132 files changed, 2517 insertions, 458 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index dc05b03b5..34110ca99 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -14,3 +14,4 @@ ansible_service_broker_launch_apb_on_bind: false
ansible_service_broker_image_pull_policy: IfNotPresent
ansible_service_broker_sandbox_role: edit
ansible_service_broker_auto_escalate: false
+ansible_service_broker_local_registry_whitelist: []
diff --git a/roles/ansible_service_broker/tasks/generate_certs.yml b/roles/ansible_service_broker/tasks/generate_certs.yml
new file mode 100644
index 000000000..85e67e00c
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/generate_certs.yml
@@ -0,0 +1,35 @@
+---
+
+- when: ansible_service_broker_certs_dir is undefined
+ block:
+ - name: Create ansible-service-broker cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/ansible-service-broker"
+ state: directory
+ mode: 0755
+ check_mode: no
+
+ - name: Create self signing ca cert
+ command: 'openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ openshift.common.config_base }}/ansible-service-broker/key.pem -out {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -days 365 -subj "/CN=asb-etcd.openshift-ansible-service-broker.svc"'
+ args:
+ creates: '{{ openshift.common.config_base }}/ansible-service-broker/cert.pem'
+
+ - name: Create self signed client cert
+ command: '{{ item.cmd }}'
+ args:
+ creates: '{{ item.creates }}'
+ with_items:
+ - cmd: openssl genrsa -out {{ openshift.common.config_base }}/ansible-service-broker/client.key 2048
+ creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.key'
+ - cmd: 'openssl req -new -key {{ openshift.common.config_base }}/ansible-service-broker/client.key -out {{ openshift.common.config_base }}/ansible-service-broker/client.csr -subj "/CN=client"'
+ creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.csr'
+ - cmd: openssl x509 -req -in {{ openshift.common.config_base }}/ansible-service-broker/client.csr -CA {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -CAkey {{ openshift.common.config_base }}/ansible-service-broker/key.pem -CAcreateserial -out {{ openshift.common.config_base }}/ansible-service-broker/client.pem -days 1024
+ creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.pem'
+
+ - set_fact:
+ ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/ansible-service-broker"
+
+- set_fact:
+ etcd_ca_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/cert.pem') }}"
+ etcd_client_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.pem') }}"
+ etcd_client_key: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.key') }}"
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 66c3d9cc4..90a4418fb 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -32,6 +32,7 @@
- include: validate_facts.yml
+- include: generate_certs.yml
# Deployment of ansible-service-broker starts here
- name: create openshift-ansible-service-broker project
@@ -68,6 +69,9 @@
- apiGroups: ["authentication.k8s.io"]
resources: ["tokenreviews"]
verbs: ["create"]
+ - apiGroups: ["image.openshift.io", ""]
+ resources: ["images"]
+ verbs: ["get", "list"]
- name: Create asb-access cluster role
oc_clusterrole:
@@ -116,6 +120,24 @@
kubernetes.io/service-account.name: asb-client
type: kubernetes.io/service-account-token
+- name: Create etcd-auth secret
+ oc_secret:
+ name: etcd-auth-secret
+ namespace: openshift-ansible-service-broker
+ contents:
+ - path: ca.crt
+ data: '{{ etcd_ca_cert }}'
+
+- name: Create broker-etcd-auth secret
+ oc_secret:
+ name: broker-etcd-auth-secret
+ namespace: openshift-ansible-service-broker
+ contents:
+ - path: client.crt
+ data: '{{ etcd_client_cert }}'
+ - path: client.key
+ data: '{{ etcd_client_key }}'
+
- oc_secret:
state: list
namespace: openshift-ansible-service-broker
@@ -123,7 +145,7 @@
register: asb_client_secret
- set_fact:
- service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt']
+ service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}"
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
@@ -156,6 +178,34 @@
app: openshift-ansible-service-broker
service: asb
+- name: create asb-etcd service
+ oc_obj:
+ name: asb-etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Service
+ content:
+ path: /tmp/asbetcdsvcout
+ data:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: asb-etcd
+ labels:
+ app: etcd
+ service: asb-etcd
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: etcd-tls
+ spec:
+ ports:
+ - name: port-2379
+ port: 2379
+ targetPort: 2379
+ protocol: TCP
+ selector:
+ app: etcd
+ service: asb-etcd
+
- name: create route for ansible-service-broker service
oc_route:
name: asb-1338
@@ -227,6 +277,8 @@
mountPath: /etc/ansible-service-broker
- name: asb-tls
mountPath: /etc/tls/private
+ - name: asb-etcd-auth
+ mountPath: /var/run/asb-etcd-auth
ports:
- containerPort: 1338
protocol: TCP
@@ -249,7 +301,50 @@
scheme: HTTPS
initialDelaySeconds: 15
timeoutSeconds: 1
+ volumes:
+ - name: config-volume
+ configMap:
+ name: broker-config
+ items:
+ - key: broker-config
+ path: config.yaml
+ - name: asb-tls
+ secret:
+ secretName: asb-tls
+ - name: asb-etcd-auth
+ secret:
+ secretName: broker-etcd-auth-secret
+- name: Create asb-etcd deployment config
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: DeploymentConfig
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: asb-etcd
+ labels:
+ app: etcd
+ service: asb-etcd
+ spec:
+ replicas: 1
+ selector:
+ app: etcd
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ app: etcd
+ service: asb-etcd
+ spec:
+ serviceAccount: asb
+ containers:
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
imagePullPolicy: IfNotPresent
@@ -258,8 +353,12 @@
args:
- "{{ ansible_service_broker_etcd_image_etcd_path }}"
- "--data-dir=/data"
- - "--listen-client-urls=http://0.0.0.0:2379"
- - "--advertise-client-urls=http://0.0.0.0:2379"
+ - "--listen-client-urls=https://0.0.0.0:2379"
+ - "--advertise-client-urls=https://0.0.0.0:2379"
+ - "--client-cert-auth"
+ - "--trusted-ca-file=/var/run/etcd-auth-secret/ca.crt"
+ - "--cert-file=/etc/tls/private/tls.crt"
+ - "--key-file=/etc/tls/private/tls.key"
ports:
- containerPort: 2379
protocol: TCP
@@ -267,21 +366,22 @@
- name: ETCDCTL_API
value: "3"
volumeMounts:
- - mountPath: /data
- name: etcd
+ - name: etcd
+ mountPath: /data
+ - name: etcd-tls
+ mountPath: /etc/tls/private
+ - name: etcd-auth
+ mountPath: /var/run/etcd-auth-secret
volumes:
- name: etcd
persistentVolumeClaim:
claimName: etcd
- - name: config-volume
- configMap:
- name: broker-config
- items:
- - key: broker-config
- path: config.yaml
- - name: asb-tls
+ - name: etcd-tls
secret:
- secretName: asb-tls
+ secretName: etcd-tls
+ - name: etcd-auth
+ secret:
+ secretName: etcd-auth-secret
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
@@ -307,16 +407,19 @@
- type: {{ ansible_service_broker_registry_type }}
name: {{ ansible_service_broker_registry_name }}
url: {{ ansible_service_broker_registry_url }}
- user: {{ ansible_service_broker_registry_user }}
- pass: {{ ansible_service_broker_registry_password }}
org: {{ ansible_service_broker_registry_organization }}
tag: {{ ansible_service_broker_registry_tag }}
white_list: {{ ansible_service_broker_registry_whitelist }}
+ - type: local_registry
+ namespaces: ['openshift']
+ white_list: {{ ansible_service_broker_local_registry_whitelist }}
dao:
- etcd_host: 0.0.0.0
+ etcd_host: asb-etcd.openshift-ansible-service-broker.svc
etcd_port: 2379
+ etcd_ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
+ etcd_client_cert: /var/run/asb-etcd-auth/client.crt
+ etcd_client_key: /var/run/asb-etcd-auth/client.key
log:
- logfile: /var/log/ansible-service-broker/asb.log
stdout: true
level: {{ ansible_service_broker_log_level }}
color: true
@@ -340,6 +443,15 @@
- type: basic
enabled: false
+- oc_secret:
+ name: asb-registry-auth
+ namespace: openshift-ansible-service-broker
+ state: present
+ contents:
+ - path: username
+ data: "{{ ansible_service_broker_registry_user }}"
+ - path: password
+ data: "{{ ansible_service_broker_registry_password }}"
- name: Create the Broker resource in the catalog
oc_obj:
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
index d8695bd3a..2ed156728 100644
--- a/roles/ansible_service_broker/tasks/main.yml
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -2,7 +2,7 @@
# do any asserts here
- include: install.yml
- when: ansible_service_broker_install | default(false) | bool
+ when: ansible_service_broker_install | default(true) | bool
- include: remove.yml
when: ansible_service_broker_remove | default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
index 51b86fb26..a1ac740e0 100644
--- a/roles/ansible_service_broker/tasks/remove.yml
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -46,18 +46,42 @@
resource_name: asb-access
user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
+- name: remove asb-registry auth secret
+ oc_secret:
+ state: absent
+ name: asb-registry-auth
+ namespace: openshift-ansible-service-broker
+
- name: remove asb-client token secret
oc_secret:
state: absent
name: asb-client
namespace: openshift-ansible-service-broker
+- name: Remove etcd-auth secret
+ oc_secret:
+ state: absent
+ name: etcd-auth-secret
+ namespace: openshift-ansible-service-broker
+
+- name: Remove broker-etcd-auth secret
+ oc_secret:
+ state: absent
+ name: broker-etcd-auth-secret
+ namespace: openshift-ansible-service-broker
+
- name: remove ansible-service-broker service
oc_service:
name: asb
namespace: openshift-ansible-service-broker
state: absent
+- name: remove asb-etcd service
+ oc_service:
+ state: absent
+ name: asb-etcd
+ namespace: openshift-ansible-service-broker
+
- name: remove etcd service
oc_service:
name: etcd
@@ -83,6 +107,14 @@
kind: DeploymentConfig
state: absent
+- name: remove Ansible Service Broker etcd deployment config
+ oc_obj:
+ name: asb-etcd
+ namespace: openshift-ansible-service-broker
+ kind: DeploymentConfig
+ state: absent
+
+
- name: remove secret for broker auth
oc_obj:
name: asb-client
diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml
index 604d24e1d..a2345551b 100644
--- a/roles/ansible_service_broker/tasks/validate_facts.yml
+++ b/roles/ansible_service_broker/tasks/validate_facts.yml
@@ -1,11 +1,9 @@
---
- name: validate Dockerhub registry settings
- fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters"
+ fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_organization"
when:
- ansible_service_broker_registry_type == 'dockerhub'
- - not (ansible_service_broker_registry_user and
- ansible_service_broker_registry_password and
- ansible_service_broker_registry_organization)
+ - not ansible_service_broker_registry_organization
- name: validate RHCC registry settings
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 8438e993f..248e0363d 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -12,6 +12,6 @@ __ansible_service_broker_registry_name: dh
__ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
-__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_organization: ansibleplaybookbundle
__ansible_service_broker_registry_tag: latest
__ansible_service_broker_registry_whitelist: []
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index fc58b4fd8..c203f596e 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -3,7 +3,7 @@
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
__ansible_service_broker_image_tag: v3.7
-__ansible_service_broker_etcd_image_prefix: rhel7/
+__ansible_service_broker_etcd_image_prefix: registry.access.redhat.com/rhel7/
__ansible_service_broker_etcd_image_tag: latest
__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 1c830cb4e..c086c28df 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -20,4 +20,19 @@ l2_docker_additional_registries: "{% if openshift_docker_additional_registries i
l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+openshift_docker_use_etc_containers: False
containers_registries_conf_path: /etc/containers/registries.conf
+
+r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_crio_os_firewall_deny: []
+r_crio_os_firewall_allow:
+- service: crio
+ port: 10010/tcp
+
+
+openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}"
+
+docker_alt_storage_path: /var/lib/containers/docker
+docker_default_storage_path: /var/lib/docker
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index b773a417c..62b8a2eb5 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
+- role: lib_os_firewall
diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml
new file mode 100644
index 000000000..fbd1ff515
--- /dev/null
+++ b/roles/docker/tasks/crio_firewall.yml
@@ -0,0 +1,40 @@
+---
+- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool
+ block:
+ - name: Add iptables allow rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove iptables rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: remove
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
+
+- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool
+ block:
+ - name: Add firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: disabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 5ea73568a..3c814d8d8 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -25,6 +25,15 @@
- not l_use_system_container
- not l_use_crio_only
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
- name: Use System Container Docker if Requested
include: systemcontainer_docker.yml
when:
@@ -35,4 +44,49 @@
include: systemcontainer_crio.yml
when:
- l_use_crio
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ - openshift_docker_is_node_or_master | bool
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- when:
+ - l_use_crio
+ - dockerstat.stat.islink is defined and not (dockerstat.stat.islink | bool)
+ block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift.docker.service_name }}"
+
+ - name: "Ensure {{ docker_alt_storage_path }} exists"
+ file:
+ path: "{{ docker_alt_storage_path }}"
+ state: directory
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift.docker.service_name }}"
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index b16413f72..c1aedf879 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -81,6 +81,7 @@
template:
dest: "{{ containers_registries_conf_path }}"
src: registries.conf
+ when: openshift_docker_use_etc_containers | bool
notify:
- restart docker
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml
index 65ed60efa..d05b7f2b8 100644
--- a/roles/docker/tasks/registry_auth.yml
+++ b/roles/docker/tasks/registry_auth.yml
@@ -7,6 +7,10 @@
- name: Create credentials for docker cli registry auth
command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ register: openshift_docker_credentials_create_res
+ retries: 3
+ delay: 5
+ until: openshift_docker_credentials_create_res.rc == 0
when:
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 13bbd359e..1e2d64293 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,16 +3,16 @@
# TODO: Much of this file is shared with container engine tasks
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
- when: l2_docker_insecure_registries
+ when: l2_docker_insecure_registries | bool
- set_fact:
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
- set_fact:
l_crio_registries: "{{ ['docker.io'] }}"
- when: not l2_docker_additional_registries
+ when: not (l2_docker_additional_registries | bool)
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
- set_fact:
l_openshift_image_tag: "{{ openshift_image_tag | string }}"
@@ -62,7 +62,7 @@
shell: lsmod | grep overlay
register: l_has_overlay_in_kernel
ignore_errors: yes
-
+ failed_when: false
- when: l_has_overlay_in_kernel.rc != 0
block:
@@ -161,6 +161,10 @@
path: /etc/cni/net.d/
state: directory
+- name: setup firewall for CRI-O
+ include: crio_firewall.yml
+ static: yes
+
- name: Configure the CNI network
template:
dest: /etc/cni/net.d/openshift-sdn.conf
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 726e8ada7..aa3b35ddd 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -173,4 +173,6 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+- include: registry_auth.yml
+
- meta: flush_handlers
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index b715c2ffa..93014a80d 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -108,7 +108,7 @@ pids_limit = 1024
# log_size_max is the max limit for the container log size in bytes.
# Negative values indicate that no limit is imposed.
-log_size_max = -1
+log_size_max = 52428800
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml
index 0804d9e1c..5c45e5ae1 100644
--- a/roles/etcd/tasks/migration/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
@@ -3,6 +3,17 @@
# Check the cluster is healthy
- include: check_cluster_health.yml
+# Check if there is at least one v2 snapshot
+- name: Check if there is at least one v2 snapshot
+ find:
+ paths: "{{ etcd_data_dir }}/member/snap"
+ patterns: '*.snap'
+ register: snapshots_result
+
+- fail:
+ msg: "Before the migration can proceed the etcd member must write down at least one snapshot under {{ etcd_data_dir }}/member/snap directory."
+ when: snapshots_result.matched | int == 0
+
# Check if the member has v3 data already
# Run the migration only if the data are v2
- name: Check if there are any v3 data
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 55c44bb84..b17358882 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -72,6 +72,7 @@ class CallbackModule(CallbackBase):
# Set the order of the installer phases
installer_phases = [
'installer_phase_initialize',
+ 'installer_phase_health',
'installer_phase_etcd',
'installer_phase_nfs',
'installer_phase_loadbalancer',
@@ -93,6 +94,10 @@ class CallbackModule(CallbackBase):
'title': 'Initialization',
'playbook': ''
},
+ 'installer_phase_health': {
+ 'title': 'Health Check',
+ 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ },
'installer_phase_etcd': {
'title': 'etcd Install',
'playbook': 'playbooks/byo/openshift-etcd/config.yml'
@@ -166,11 +171,6 @@ class CallbackModule(CallbackBase):
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phase_attributes[phase]['playbook']))
- else:
- # Phase was not found in custom stats
- self._display.display(
- '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
- color=C.COLOR_SKIP)
self._display.display("", screen_only=True)
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 0614f359d..62bda33ad 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1633,7 +1633,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files, force)
+ secret = self.prep_secret(files, force=force)
if secret['returncode'] != 0:
return secret
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index e88f3ae8d..7e7d0fa60 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1664,7 +1664,7 @@ def main():
name=dict(default=None, type='str'),
annotations=dict(default=None, type='dict'),
parameters=dict(default=None, type='dict'),
- provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ provisioner=dict(required=True, type='str'),
api_version=dict(default='v1', type='str'),
default_storage_class=dict(default="false", type='str'),
),
diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py
index e9f3ebbd3..a8f371661 100644
--- a/roles/lib_openshift/src/ansible/oc_storageclass.py
+++ b/roles/lib_openshift/src/ansible/oc_storageclass.py
@@ -14,7 +14,7 @@ def main():
name=dict(default=None, type='str'),
annotations=dict(default=None, type='dict'),
parameters=dict(default=None, type='dict'),
- provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ provisioner=dict(required=True, type='str'),
api_version=dict(default='v1', type='str'),
default_storage_class=dict(default="false", type='str'),
),
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index 5322d6241..89e70b6b2 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -67,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files, force)
+ secret = self.prep_secret(files, force=force)
if secret['returncode'] != 0:
return secret
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 21da6b953..410b739e9 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -7,8 +7,13 @@
openshift.master.cluster_method == 'native'
# TODO: need to fix up ignore_errors here
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when: >
(openshift_master_ha | bool) and
(not master_controllers_service_status_changed | default(false)) and
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 5371588cf..9f3c14bad 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -12,7 +12,6 @@ openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
-openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
@@ -48,7 +47,14 @@ openshift_aws_elb_health_check:
unhealthy_threshold: 2
healthy_threshold: 2
-openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_name_dict:
+ master:
+ external: "{{ openshift_aws_elb_basename }}-external"
+ internal: "{{ openshift_aws_elb_basename }}-internal"
+ infra:
+ external: "{{ openshift_aws_elb_basename }}"
+
openshift_aws_elb_idle_timout: 400
openshift_aws_elb_scheme: internet-facing
openshift_aws_elb_cert_arn: ''
@@ -75,6 +81,18 @@ openshift_aws_elb_listeners:
load_balancer_port: 443
instance_protocol: tcp
instance_port: 443
+ infra:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
- device_name: /dev/sdb
@@ -88,7 +106,7 @@ openshift_aws_node_group_config_node_volumes:
device_type: gp2
delete_on_termination: True
-openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
openshift_aws_node_group_replace_all_instances: False
@@ -114,6 +132,7 @@ openshift_aws_node_group_config:
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}"
compute:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -148,21 +167,22 @@ openshift_aws_node_group_config:
type: infra
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}"
+
+openshift_aws_elb_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
+openshift_aws_elb_az_load_balancing: False
openshift_aws_elb_security_groups:
-- "{{ openshift_aws_clusterid }}"
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+- "{{ openshift_aws_clusterid }}" # default sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
openshift_aws_elb_instance_filter:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
"tag:host-type": "{{ openshift_aws_node_group_type }}"
instance-state-name: running
-openshift_aws_launch_config_security_groups:
-- "{{ openshift_aws_clusterid }}" # default sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
-
+openshift_aws_security_groups_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_security_groups:
default:
name: "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index 06e1f9602..a9893c0a7 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -9,17 +9,17 @@ class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
- def build_instance_tags(clusterid, status='owned'):
+ def build_instance_tags(clusterid):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
- {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
'''
tags = {'clusterid': clusterid,
- 'kubernetes.io/cluster/{}'.format(clusterid): status}
+ 'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
return tags
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 0dac1c23d..0aac40ddd 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -21,10 +21,6 @@
- "'results' in amiout"
- amiout.results|length > 0
-- when: openshift_aws_create_security_groups
- name: "Create {{ openshift_aws_node_group_type }} security groups"
- include: security_group.yml
-
- when: openshift_aws_create_launch_config
name: "Create {{ openshift_aws_node_group_type }} launch config"
include: launch_config.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 7bc3184df..56abe9dd7 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -9,12 +9,6 @@
- name: debug
debug: var=vpcout
-- name: fetch the remote instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region }}"
- filters: "{{ openshift_aws_elb_instance_filter }}"
- register: instancesout
-
- name: fetch the default subnet id
ec2_vpc_subnet_facts:
region: "{{ openshift_aws_region }}"
@@ -23,7 +17,7 @@
vpc-id: "{{ vpcout.vpcs[0].id }}"
register: subnetout
-- name:
+- name: dump the elb listeners
debug:
msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
@@ -33,6 +27,7 @@
ec2_elb_lb:
name: "{{ l_openshift_aws_elb_name }}"
state: present
+ cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
security_group_names: "{{ openshift_aws_elb_security_groups }}"
idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
region: "{{ openshift_aws_region }}"
@@ -43,25 +38,9 @@
if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
else openshift_aws_elb_listeners }}"
scheme: "{{ openshift_aws_elb_scheme }}"
- tags:
- KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_elb_tags }}"
register: new_elb
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-# state when first added to ELB
-- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}"
- ec2_elb:
- instance_id: "{{ item.id }}"
- ec2_elbs: "{{ l_openshift_aws_elb_name }}"
- state: present
- region: "{{ openshift_aws_region }}"
- wait: False
- with_items: "{{ instancesout.instances }}"
- ignore_errors: True
- retries: 10
- register: elb_call
- until: elb_call|succeeded
-
- debug:
msg: "{{ item }}"
with_items:
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 8b7b02a0e..94aca5a35 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -19,7 +19,7 @@
- name: fetch the security groups for launch config
ec2_group_facts:
filters:
- group-name: "{{ openshift_aws_launch_config_security_groups }}"
+ group-name: "{{ openshift_aws_elb_security_groups }}"
vpc-id: "{{ vpcout.vpcs[0].id }}"
region: "{{ openshift_aws_region }}"
register: ec2sgs
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
index 737cfc7a6..1c99229ff 100644
--- a/roles/openshift_aws/tasks/master_facts.yml
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -3,20 +3,18 @@
ec2_elb_facts:
region: "{{ openshift_aws_region }}"
names:
- - "{{ item }}"
- with_items:
- - "{{ openshift_aws_elb_name }}-external"
- - "{{ openshift_aws_elb_name }}-internal"
+ - "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}"
delegate_to: localhost
register: elbs
- debug: var=elbs
+ run_once: true
- name: set fact
set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}"
osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "{{ elbs.elbs[0].dns_name }}"
- "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
with_items: "{{ groups['masters'] }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index a8518d43a..e99017b9f 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -7,6 +7,38 @@
name: create s3 bucket for registry
include: s3.yml
+- when: openshift_aws_create_security_groups
+ block:
+ - name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+
+ - name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+ vars:
+ openshift_aws_node_group_type: infra
+
+- name: create our master internal load balancer
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: internal
+ openshift_aws_elb_scheme: internal
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}"
+
+- name: create our master external load balancer
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_scheme: internet-facing
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['external'] }}"
+
+- name: create our infra node external load balancer
+ include: elb.yml
+ vars:
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict['infra']['external'] }}"
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_scheme: internet-facing
+ openshift_aws_node_group_type: infra
+
- name: include scale group creation for master
include: build_node_group.yml
@@ -22,20 +54,6 @@
delay: 3
until: instancesout.instances|length > 0
-- name: create our master internal load balancers
- include: elb.yml
- vars:
- openshift_aws_elb_direction: internal
- l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal"
- openshift_aws_elb_scheme: internal
-
-- name: create our master external load balancers
- include: elb.yml
- vars:
- openshift_aws_elb_direction: external
- l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external"
- openshift_aws_elb_scheme: internet-facing
-
- name: wait for ssh to become available
wait_for:
port: 22
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index d319fdd1a..0cb749dcc 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,11 +1,4 @@
---
-- name: Remove any ansible facts created during AMI creation
- file:
- path: "/etc/ansible/facts.d/{{ item }}"
- state: absent
- with_items:
- - openshift.fact
-
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 161e72fb4..e1fb99b02 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -38,8 +38,7 @@
- name: tag sg groups with proper tags
ec2_tag:
- tags:
- KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_security_groups_tags }}"
resource: "{{ item.group_id }}"
region: "{{ openshift_aws_region }}"
with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index ed9c0ed0b..76aebdcea 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -9,7 +9,7 @@ write_files:
content: |
openshift_group_type: {{ openshift_aws_node_group_type }}
{% if openshift_aws_node_group_type != 'master' %}
-- path: /etc/origin/node/csr_kubeconfig
+- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
encoding: b64
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 419679bc2..587526d07 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -18,9 +18,7 @@
- name: Reload generated facts
openshift_facts:
- when: install_result | changed
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
+ when: hostvars[openshift_ca_host].install_result | changed
- name: Create openshift_ca_config_dir if it does not exist
file:
@@ -108,6 +106,36 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
+# Create client-ca-bundle.crt containing old and new OpenShift CA
+# certificates. This bundle will be used when rolling the OpenShift CA
+# certificate.
+- name: Create client-ca-bundle.crt
+ block:
+ - command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: openshift_ca_clientconfig_tmpdir
+ delegate_to: "{{ openshift_ca_host }}"
+ - copy:
+ src: "{{ item }}"
+ dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/"
+ remote_src: true
+ with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+ - copy:
+ src: "{{ openshift_ca_config_dir }}/ca.crt"
+ dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/"
+ remote_src: true
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+ - assemble:
+ src: "{{ openshift_ca_clientconfig_tmpdir.stdout }}"
+ dest: "{{ openshift_ca_config_dir }}/client-ca-bundle.crt"
+ mode: 0644
+ owner: root
+ group: root
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
- name: Test local loopback context
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index b40c49701..08045794a 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -36,7 +36,7 @@ class BinarySyncer(object):
self.changed = False
self.output = []
self.bin_dir = '/usr/local/bin'
- self.image = image
+ self._image = image
self.tag = tag
self.backend = backend
self.temp_dir = None # TBD
@@ -142,6 +142,33 @@ class BinarySyncer(object):
self.output.append("Moved %s to %s." % (src_path, dest_path))
self.changed = True
+ @property
+ def raw_image(self):
+ """
+ Returns the image as it was originally passed in to the instance.
+
+ .. note::
+ This image string will only work directly with the atomic command.
+
+ :returns: The original image passed in.
+ :rtype: str
+ """
+ return self._image
+
+ @property
+ def image(self):
+ """
+ Returns the image without atomic prefixes used to map to skopeo args.
+
+ :returns: The image string without prefixes
+ :rtype: str
+ """
+ image = self._image
+ for remove in ('oci:', 'http:', 'https:'):
+ if image.startswith(remove):
+ image = image.replace(remove, '')
+ return image
+
def main():
module = AnsibleModule( # noqa: F405
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index bdece7640..014c06641 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -13,6 +13,12 @@ openshift_storageclass_defaults:
parameters:
type: pd-standard
+ openstack:
+ name: standard
+ provisioner: cinder
+ parameters:
+ fstype: xfs
+
openshift_storageclass_default: "true"
openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
index 172e2ac25..281ec8ed5 100644
--- a/roles/openshift_default_storage_class/tasks/main.yml
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -1,5 +1,5 @@
---
-# Install default storage classes in GCE & AWS
+# Install default storage classes in GCE & AWS & OPENSTACK
- name: Ensure storageclass object
oc_storageclass:
name: "{{ openshift_storageclass_name }}"
diff --git a/roles/openshift_docker_gc/defaults/main.yml b/roles/openshift_docker_gc/defaults/main.yml
new file mode 100644
index 000000000..9d79de8a1
--- /dev/null
+++ b/roles/openshift_docker_gc/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+r_enable_docker_gc: "{{ openshift_crio_enable_docker_gc | default(False) }}"
+r_docker_gc_node_selectors: "{{ openshift_crio_docker_gc_node_selector | default({}) }}"
diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml
new file mode 100644
index 000000000..f88a7c533
--- /dev/null
+++ b/roles/openshift_docker_gc/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: docker garbage collection
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_docker_gc/tasks/main.yaml b/roles/openshift_docker_gc/tasks/main.yaml
new file mode 100644
index 000000000..9ba551479
--- /dev/null
+++ b/roles/openshift_docker_gc/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create docker-gc tempdir
+ command: mktemp -d
+ register: templates_tmpdir
+
+# NOTE: oc_adm_policy_user does not support -z (yet)
+- name: Add dockergc as priviledged
+ shell: oc adm policy add-scc-to-user -z dockergc privileged
+# oc_adm_policy_user:
+# user: dockergc
+# resource_kind: scc
+# resource_name: privileged
+# state: present
+
+- name: Create dockergc DaemonSet
+ become: yes
+ template:
+ src: dockergc-ds.yaml.j2
+ dest: "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml"
+
+- name: Apply dockergc DaemonSet
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "dockergc"
+ files:
+ - "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml"
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
new file mode 100644
index 000000000..53e8b448b
--- /dev/null
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -0,0 +1,58 @@
+apiVersion: v1
+kind: List
+items:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: dockergc
+ # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # in order for the dockergc to access the docker socket and root directory
+- apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: dockergc
+ labels:
+ app: dockergc
+ spec:
+ template:
+ metadata:
+ labels:
+ app: dockergc
+ name: dockergc
+ spec:
+{# Only set nodeSelector if the dict is not empty #}
+{% if r_docker_gc_node_selectors %}
+ nodeSelector:
+{% for k,v in r_docker_gc_node_selectors.items() %}
+ {{ k }}: {{ v }}{% endfor %}{% endif %}
+
+ serviceAccountName: dockergc
+ containers:
+ - image: openshift/origin:latest
+ args:
+ - "ex"
+ - "dockergc"
+ - "--image-gc-low-threshold=60"
+ - "--image-gc-high-threshold=80"
+ - "--minimum-ttl-duration=1h0m0s"
+ securityContext:
+ privileged: true
+ name: dockergc
+ resources:
+ requests:
+ memory: 30Mi
+ cpu: 50m
+ volumeMounts:
+ - name: docker-root
+ readOnly: true
+ mountPath: /var/lib/docker
+ - name: docker-socket
+ readOnly: false
+ mountPath: /var/run/docker.sock
+ volumes:
+ - name: docker-root
+ hostPath:
+ path: /var/lib/docker
+ - name: docker-socket
+ hostPath:
+ path: /var/run/docker.sock
diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
index 0bb56452b..af66b9ea4 100644
--- a/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
@@ -31,6 +31,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.1"
}
},
{
@@ -44,6 +48,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.2"
}
},
{
@@ -56,6 +64,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.3"
}
}
]
@@ -84,6 +96,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.1"
}
},
{
@@ -97,6 +113,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.2"
}
},
{
@@ -109,6 +129,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.3"
}
}
]
@@ -137,6 +161,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift:1.0"
}
}
]
@@ -165,6 +193,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift:1.0"
}
}
]
@@ -194,6 +226,10 @@
"sampleRef": "6.4.x",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.1"
}
},
{
@@ -208,6 +244,10 @@
"sampleRef": "6.4.x",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.2"
}
},
{
@@ -222,6 +262,10 @@
"sampleRef": "6.4.x",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.3"
}
},
{
@@ -236,6 +280,10 @@
"sampleRef": "6.4.x",
"version": "1.4",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.4"
}
},
{
@@ -248,7 +296,12 @@
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.5"
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.5"
}
}
]
@@ -278,6 +331,10 @@
"sampleRef": "7.0.0.GA",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.3"
}
},
{
@@ -292,6 +349,10 @@
"sampleRef": "7.0.0.GA",
"version": "1.4",
"openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.4"
}
},
{
@@ -304,7 +365,44 @@
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.5"
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.5"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap71-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap71-openshift",
+ "tags": [
+ {
+ "name": "1.0-TP",
+ "annotations": {
+ "description": "JBoss EAP 7.1 Tech Preview.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.1,javaee:7,java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7-tech-preview/eap71-openshift:1.0"
}
}
]
@@ -334,6 +432,10 @@
"sampleRef": "1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift:1.2"
}
}
]
@@ -363,6 +465,10 @@
"sampleRef": "1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.3"
}
},
{
@@ -375,7 +481,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.4"
}
}
]
@@ -400,7 +511,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.0"
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift:1.0"
}
}
]
@@ -430,6 +546,10 @@
"sampleRef": "1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.3"
}
},
{
@@ -442,7 +562,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.4"
}
}
]
@@ -467,7 +592,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.0"
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift:1.0"
}
}
]
@@ -494,6 +624,10 @@
"supports": "datagrid:6.5,xpaas:1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.2"
}
},
{
@@ -505,6 +639,10 @@
"supports": "datagrid:6.5,xpaas:1.4",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.3"
}
},
{
@@ -514,7 +652,42 @@
"iconClass": "icon-jboss",
"tags": "datagrid,jboss,xpaas",
"supports":"datagrid:6.5,xpaas:1.4",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid71-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 7.1 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:7.1,xpaas:1.0",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift:1.0"
}
}
]
@@ -540,6 +713,39 @@
"tags": "client,jboss,xpaas",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift:1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid71-client-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 7.1 Client Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift:1.0"
}
}
]
@@ -566,6 +772,10 @@
"supports": "datavirt:6.3,xpaas:1.4",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.0"
}
},
{
@@ -577,6 +787,10 @@
"supports": "datavirt:6.3,xpaas:1.4",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.1"
}
},
{
@@ -586,7 +800,12 @@
"iconClass": "icon-jboss",
"tags": "datavirt,jboss,xpaas",
"supports":"datavirt:6.3,xpaas:1.4",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.2"
}
}
]
@@ -612,6 +831,10 @@
"tags": "client,jboss,xpaas",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift:1.0"
}
}
]
@@ -638,6 +861,10 @@
"supports": "amq:6.2,messaging,xpaas:1.1",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.1"
}
},
{
@@ -649,6 +876,10 @@
"supports": "amq:6.2,messaging,xpaas:1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.2"
}
},
{
@@ -660,6 +891,10 @@
"supports": "amq:6.2,messaging,xpaas:1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.3"
}
},
{
@@ -669,7 +904,27 @@
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
"supports":"amq:6.2,messaging,xpaas:1.4",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.4"
+ }
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.5",
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.5"
}
}
]
@@ -696,6 +951,25 @@
"supports": "amq:6.3,messaging,xpaas:1.0",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.0"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.3 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.3,messaging,xpaas:1.1",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.1"
}
}
]
@@ -723,6 +997,10 @@
"supports": "sso:7.0,xpaas:1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.3"
}
},
{
@@ -734,6 +1012,10 @@
"supports": "sso:7.0,xpaas:1.4",
"version": "1.4",
"openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.4"
}
}
]
@@ -761,6 +1043,10 @@
"supports": "sso:7.1,xpaas:1.4",
"version": "1.0",
"openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.0"
}
},
{
@@ -772,6 +1058,10 @@
"supports": "sso:7.1,xpaas:1.4",
"version": "1.1",
"openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.1"
}
}
]
@@ -800,6 +1090,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.0"
}
},
{
@@ -813,6 +1107,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.1"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
index 0aad7fae6..af66b9ea4 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
@@ -31,6 +31,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.1"
}
},
{
@@ -44,6 +48,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.2"
}
},
{
@@ -56,6 +64,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.3"
}
}
]
@@ -84,6 +96,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.1"
}
},
{
@@ -97,6 +113,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.2"
}
},
{
@@ -109,6 +129,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.3"
}
}
]
@@ -137,6 +161,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift:1.0"
}
}
]
@@ -165,6 +193,10 @@
"sampleContextDir": "tomcat-websocket-chat",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift:1.0"
}
}
]
@@ -194,6 +226,10 @@
"sampleRef": "6.4.x",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.1"
}
},
{
@@ -208,6 +244,10 @@
"sampleRef": "6.4.x",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.2"
}
},
{
@@ -222,6 +262,10 @@
"sampleRef": "6.4.x",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.3"
}
},
{
@@ -236,6 +280,10 @@
"sampleRef": "6.4.x",
"version": "1.4",
"openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.4"
}
},
{
@@ -248,7 +296,12 @@
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "6.4.x",
- "version": "1.5"
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss EAP 6.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.5"
}
}
]
@@ -278,6 +331,10 @@
"sampleRef": "7.0.0.GA",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.3"
}
},
{
@@ -292,6 +349,10 @@
"sampleRef": "7.0.0.GA",
"version": "1.4",
"openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.4"
}
},
{
@@ -304,7 +365,12 @@
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
"sampleRef": "7.0.0.GA",
- "version": "1.5"
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.5"
}
}
]
@@ -366,6 +432,10 @@
"sampleRef": "1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift:1.2"
}
}
]
@@ -395,6 +465,10 @@
"sampleRef": "1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.3"
}
},
{
@@ -407,7 +481,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.4"
}
}
]
@@ -432,7 +511,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "decisionserver/hellorules",
"sampleRef": "1.3",
- "version": "1.0"
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift:1.0"
}
}
]
@@ -462,6 +546,10 @@
"sampleRef": "1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.3"
}
},
{
@@ -474,7 +562,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.4"
}
}
]
@@ -499,7 +592,12 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
"sampleContextDir": "processserver/library",
"sampleRef": "1.3",
- "version": "1.0"
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift:1.0"
}
}
]
@@ -526,6 +624,10 @@
"supports": "datagrid:6.5,xpaas:1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.2"
}
},
{
@@ -537,6 +639,10 @@
"supports": "datagrid:6.5,xpaas:1.4",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.3"
}
},
{
@@ -546,7 +652,12 @@
"iconClass": "icon-jboss",
"tags": "datagrid,jboss,xpaas",
"supports":"datagrid:6.5,xpaas:1.4",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.4"
}
}
]
@@ -573,6 +684,10 @@
"supports": "datagrid:7.1,xpaas:1.0",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Grid 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift:1.0"
}
}
]
@@ -598,6 +713,10 @@
"tags": "client,jboss,xpaas",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift:1.0"
}
}
]
@@ -623,6 +742,10 @@
"tags": "client,jboss,xpaas",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift:1.0"
}
}
]
@@ -649,6 +772,10 @@
"supports": "datavirt:6.3,xpaas:1.4",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.0"
}
},
{
@@ -660,6 +787,10 @@
"supports": "datavirt:6.3,xpaas:1.4",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.1"
}
},
{
@@ -669,7 +800,12 @@
"iconClass": "icon-jboss",
"tags": "datavirt,jboss,xpaas",
"supports":"datavirt:6.3,xpaas:1.4",
- "version": "1.2"
+ "version": "1.2",
+ "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.2"
}
}
]
@@ -695,6 +831,10 @@
"tags": "client,jboss,xpaas",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift:1.0"
}
}
]
@@ -721,6 +861,10 @@
"supports": "amq:6.2,messaging,xpaas:1.1",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.1"
}
},
{
@@ -732,6 +876,10 @@
"supports": "amq:6.2,messaging,xpaas:1.2",
"version": "1.2",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.2"
}
},
{
@@ -743,6 +891,10 @@
"supports": "amq:6.2,messaging,xpaas:1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.3"
}
},
{
@@ -752,7 +904,12 @@
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
"supports":"amq:6.2,messaging,xpaas:1.4",
- "version": "1.4"
+ "version": "1.4",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.4"
}
},
{
@@ -762,7 +919,12 @@
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
"supports":"amq:6.2,messaging,xpaas:1.5",
- "version": "1.5"
+ "version": "1.5",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.5"
}
}
]
@@ -789,6 +951,10 @@
"supports": "amq:6.3,messaging,xpaas:1.0",
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.0"
}
},
{
@@ -800,6 +966,10 @@
"supports": "amq:6.3,messaging,xpaas:1.1",
"version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.1"
}
}
]
@@ -827,6 +997,10 @@
"supports": "sso:7.0,xpaas:1.3",
"version": "1.3",
"openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.3"
}
},
{
@@ -838,6 +1012,10 @@
"supports": "sso:7.0,xpaas:1.4",
"version": "1.4",
"openshift.io/display-name": "Red Hat Single Sign-On 7.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.4"
}
}
]
@@ -865,6 +1043,10 @@
"supports": "sso:7.1,xpaas:1.4",
"version": "1.0",
"openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.0"
}
},
{
@@ -876,6 +1058,10 @@
"supports": "sso:7.1,xpaas:1.4",
"version": "1.1",
"openshift.io/display-name": "Red Hat Single Sign-On 7.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.1"
}
}
]
@@ -904,6 +1090,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.0"
}
},
{
@@ -917,6 +1107,10 @@
"sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
"sampleContextDir": "undertow-servlet",
"version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.1"
}
}
]
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 33028fea4..a88945538 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1289,7 +1289,7 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
- for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
+ for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
env_path = filename % facts['common']['service_type']
if not os.path.exists(env_path):
continue
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index 5ed6d9f84..4d150bc74 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -313,11 +313,11 @@ fi
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
-{% if node_group.bootstrap | default(False) %}
-# not waiting for {{ node_group.name }} due to bootstrapping
-{% else %}
+{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
# wait for stable {{ node_group.name }}
( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
+{% else %}
+# not waiting for {{ node_group.name }} due to bootstrapping
{% endif %}
{% endfor %}
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
index a1e0affec..c9213b800 100644
--- a/roles/openshift_gcp/templates/remove.j2.sh
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -37,7 +37,7 @@ function teardown() {
# scale down {{ node_group.name }}
(
# performs a delete and scale down as one operation to ensure maximum parallelism
- if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then
+ if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then
exit 0
fi
instances="${instances%?}"
@@ -59,6 +59,21 @@ if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bu
fi
) &
+# Project metadata prefixed with {{ openshift_gcp_prefix }}
+(
+ for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do
+ if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}"
+ fi
+ done
+) &
+
+# Instances and disks used for image building
+(
+ teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}"
+ teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}"
+) &
+
# DNS
(
dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
@@ -152,5 +167,12 @@ for i in `jobs -p`; do wait $i; done
for i in `jobs -p`; do wait $i; done
+# Images specifically located under this cluster prefix family
+for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do
+ ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) &
+done
+
# Network
-teardown "{{ openshift_gcp_network_name }}" compute networks
+( teardown "{{ openshift_gcp_network_name }}" compute networks ) &
+
+for i in `jobs -p`; do wait $i; done \ No newline at end of file
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 7956559c6..87e6146d4 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -1,6 +1,7 @@
"""Check that there is enough disk space in predefined paths."""
import tempfile
+import os.path
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
@@ -121,11 +122,21 @@ class DiskAvailability(OpenShiftCheck):
return {}
+ def find_ansible_submounts(self, path):
+ """Return a list of ansible_mounts that are below the given path."""
+ base = os.path.join(path, "")
+ return [
+ mount
+ for mount in self.get_var("ansible_mounts")
+ if mount["mount"].startswith(base)
+ ]
+
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
+ submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))
mount = self.find_ansible_mount(path)
try:
- return mount['size_available']
+ return mount['size_available'] + submounts
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index 0558ddf14..6808d8b2f 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
"""
name = "docker_storage"
- tags = ["pre-install", "health", "preflight"]
+ tags = ["health", "preflight"]
dependencies = ["python-docker-py"]
storage_drivers = ["devicemapper", "overlay", "overlay2"]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 29a325a17..7acdb40ec 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
'size_available': 20 * 10**9 + 1,
}],
),
+ (
+ ['oo_masters_to_config'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ }, { # not enough directly on /var
+ 'mount': '/var',
+ 'size_available': 10 * 10**9 + 1,
+ }, {
+ # but subdir mounts add up to enough
+ 'mount': '/var/lib/docker',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ 'mount': '/var/lib/origin',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
])
def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
task_vars = dict(
@@ -104,9 +122,10 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert not result.get('failed', False)
+ assert not check.failures
@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 2aceef9e4..dd7053656 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -29,7 +29,9 @@
src: "{{ item }}"
with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {}
+ when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or
+ ( openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )
+
# This is for when we desire a cluster signed cert
# The certificate is generated and placed in master_config_dir/
@@ -42,8 +44,8 @@
hostnames:
- "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
- "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
- cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
- key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
+ cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
+ key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
with_items: "{{ openshift_hosted_routers }}"
- name: set the openshift_hosted_router_certificate
@@ -55,6 +57,7 @@
when:
- openshift_hosted_router_create_certificate | bool
- openshift_hosted_router_certificate == {}
+ - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0
- name: Create the router service account(s)
oc_serviceaccount:
diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
index ce7688581..88b893448 100644
--- a/roles/openshift_hosted_metrics/handlers/main.yml
+++ b/roles/openshift_hosted_metrics/handlers/main.yml
@@ -4,8 +4,13 @@
when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
notify: Verify API Server
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- name: Verify API Server
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 0ea34faf2..6c5bb8693 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -297,6 +297,8 @@ oc delete pod --selector=<ds_selector>
Changelog
---------
+Tue Oct 26, 2017
+- Make CPU request equal limit if limit is greater then request
Tue Oct 10, 2017
- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 959573635..e1a5ea726 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -3,6 +3,7 @@
'''
import random
+import re
def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
@@ -17,6 +18,31 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def min_cpu(left, right):
+ '''Return the minimum cpu value of the two values given'''
+ message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M"
+ pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$")
+ millis_per_core = 1000
+ if not right:
+ return left
+ m_left = pattern.match(left)
+ if not m_left:
+ raise RuntimeError(message.format("left", left))
+ m_right = pattern.match(right)
+ if not m_right:
+ raise RuntimeError(message.format("right", right))
+ left_value = float(m_left.group(1))
+ right_value = float(m_right.group(1))
+ if m_left.group(2) not in ["M", "m"]:
+ left_value = left_value * millis_per_core
+ if m_right.group(2) not in ["M", "m"]:
+ right_value = right_value * millis_per_core
+ response = left
+ if left_value != min(left_value, right_value):
+ response = right
+ return response
+
+
def walk(source, path, default, delimiter='.'):
'''Walk the sourch hash given the path and return the value or default if not found'''
if not isinstance(source, dict):
@@ -87,6 +113,7 @@ class FilterModule(object):
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
'map_from_pairs': map_from_pairs,
+ 'min_cpu': min_cpu,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
'serviceaccount_namespace': serviceaccount_namespace,
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
index 3ad956cca..bac25c012 100644
--- a/roles/openshift_logging/filter_plugins/test
+++ b/roles/openshift_logging/filter_plugins/test
@@ -1,7 +1,22 @@
import unittest
from openshift_logging import walk
+from openshift_logging import min_cpu
class TestFilterMethods(unittest.TestCase):
+
+
+ def test_min_cpu_for_none(self):
+ source = "1000M"
+ self.assertEquals(min_cpu(source, None), "1000M")
+
+ def test_min_cpu_for_millis(self):
+ source = "1"
+ self.assertEquals(min_cpu(source, "0.1"), "0.1")
+
+
+ def test_min_cpu_for_whole(self):
+ source = "120M"
+ self.assertEquals(min_cpu(source, "2"), "120M")
def test_walk_find_key(self):
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ce7688581..acc838bd1 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -4,8 +4,13 @@
when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
notify: Verify API Server
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "{{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- name: Verify API Server
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 668a3f7e7..cec295d65 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -36,6 +36,14 @@
- openshift_logging_label_key != ""
- openshift_logging_label_value is defined
+- name: Annotate Logging Project to allow overcommit
+ oc_edit:
+ kind: ns
+ name: "{{ openshift_logging_namespace }}"
+ separator: '#'
+ content:
+ metadata#annotations#quota.openshift.io/cluster-resource-override-enabled: "false"
+
- name: Create logging cert directory
file:
path: "{{ openshift.common.config_base }}/logging"
@@ -70,7 +78,7 @@
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
@@ -128,7 +136,7 @@
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index b4ddf45d9..fcaf18ed4 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,7 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
- curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
deleted file mode 100644
index 339b5a1b2..000000000
--- a/roles/openshift_logging_elasticsearch/files/es_migration.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-CA=${1:-/etc/openshift/logging/ca.crt}
-KEY=${2:-/etc/openshift/logging/system.admin.key}
-CERT=${3:-/etc/openshift/logging/system.admin.crt}
-openshift_logging_es_host=${4:-logging-es}
-openshift_logging_es_port=${5:-9200}
-namespace=${6:-logging}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# skip indices that contain a uuid
-# get a list of unique project
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_proj_uuid_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
- echo "No Elasticsearch pods found running. Cannot update common data model."
- exit 1
-fi
-
-count=$(get_list_of_indices | wc -l)
-if [ $count -eq 0 ]; then
- echo No matching indices found - skipping update_for_uuid
-else
- echo Creating aliases for $count index patterns . . .
- {
- echo '{"actions":['
- get_list_of_indices | \
- while IFS=. read proj ; do
- # e.g. make test.uuid.* an alias of test.* so we can search for
- # /test.uuid.*/_search and get both the test.uuid.* and
- # the test.* indices
- uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
- [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
- done
- echo ']}'
- } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
-fi
-
-count=$(get_list_of_proj_uuid_indices | wc -l)
-if [ $count -eq 0 ] ; then
- echo No matching indexes found - skipping update_for_common_data_model
- exit 0
-fi
-
-echo Creating aliases for $count index patterns . . .
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-{
- echo '{"actions":['
- get_list_of_proj_uuid_indices | \
- while IFS=. read proj uuid ; do
- # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
- # /project.test.uuid.*/_search and get both the test.uuid.* and
- # the project.test.uuid.* indices
- echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
- done
- echo ']}'
-} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 7aabdc861..e7ef443bd 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -354,7 +354,7 @@
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
- es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index f56810610..2f89c3f9f 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -154,7 +154,6 @@
path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
# create Fluentd daemonset
-
# this should change based on the type of fluentd deployment to be done...
# TODO: pass in aggregation configurations
- name: Generate logging-fluentd daemonset definition
@@ -173,7 +172,7 @@
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
- fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 809f7a631..8ef8ede9a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,10 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
- kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
- kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 1b46a7ac3..5b257139e 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,7 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
- mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 3a71d9211..96de82669 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`):
* [Cloud Provider](#cloud-provider)
* [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)
* [Customization](#customization)
+ * [Container Provider](#container-provider)
+ * [Manually](#manually)
+ * [Automatically](#automatically)
+ * [Multiple Providers](#multiple-providers)
* [Uninstall](#uninstall)
* [Additional Information](#additional-information)
@@ -80,30 +84,20 @@ to there being no databases that require pods.
*Be extra careful* if you are overriding template
parameters. Including parameters not defined in a template **will
-cause errors**.
-
-**Container Provider Integration** - If you want add your container
-platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you
-must ensure that the infrastructure management hooks are installed.
-
-* During your OCP/Origin install, ensure that you have the
- `openshift_use_manageiq` parameter set to `true` in your inventory
- at install time. This will create a `management-infra` project and a
- service account user.
-* After CFME/MIQ is installed, obtain the `management-admin` service
- account token and copy it somewhere safe.
-
-```bash
-$ oc serviceaccounts get-token -n management-infra management-admin
-eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig
-```
+cause errors**. If you do receive an error during the `Ensure the CFME
+App is created` task, we recommend running the
+[uninstall scripts](#uninstall) first before running the installer
+again.
-* In the CFME/MIQ web interface, navigate to `Compute` →
- `Containers` → `Providers` and select `⚙ Configuration` → `⊕
- Add a new Containers Provider`
+### Beta
-*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.*
+Only required for enterprise
+(`openshift_deployment_type=openshift-enterprise`) users:
+* `openshift_management_install_beta` - by setting this value to
+ `true` you acknowledge that this software is currently in BETA and
+ support may be limited nonexistent. This is required to begin the
+ installation.
# Requirements
@@ -140,11 +134,14 @@ used in your Ansible inventory to control the behavior of this
installer.
-| Variable | Required | Default | Description |
-|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
-| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
+| Variable | Required | Default | Description |
+|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
+| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. |
-| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_install_beta` | **No** | `false` | Boolean, by setting this value to `true` you acknowledge that this software is currently in BETA and support may be limited. Only required for *openshift-enterprise* users. |
+| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) |
+| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |
| **PRODUCT CHOICE** | | | | |
| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |
| **STORAGE CLASSES** | | | | |
@@ -268,6 +265,9 @@ openshift_management_app_template=cfme-template-ext-db
openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
```
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
# Limitations
This release is the first OpenShift CFME release in the OCP 3.7
@@ -318,7 +318,10 @@ inventory. The following keys are required:
* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*
* `DATABASE_NAME`
-Your inventory would contain a line similar to this:
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
+Your inventory would contain lines similar to this:
```ini
[OSEv3:vars]
@@ -336,7 +339,11 @@ At run time you may run into errors similar to this:
TASK [openshift_management : Ensure the CFME App is created] ***********************************
task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 *******
-{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""}
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management",
+ "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server
+ (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version
+ \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char
+ 'f'\n", "stdout": ""}
```
Or like this:
@@ -346,7 +353,10 @@ TASK [openshift_management : Ensure the CFME App is created] *******************
task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 *******
fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg":
-{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""},
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind":
+ "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\"
+ is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP
+ address, (e.g. 10.9.8.7)\n", "stdout": ""},
```
While intimidating at first, there are useful bits of information in
@@ -453,6 +463,116 @@ hash. This applies to **CloudForms** installations as well:
[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),
[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml).
+# Container Provider
+
+There are two methods for enabling container provider integration. You
+can manually add OCP/Origin as a container provider, or you can try
+the playbooks included with this role.
+
+## Manually
+
+See the online documentation for steps to manually add you cluster as
+a container provider:
+
+* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers)
+
+## Automatically
+
+Automated container provider integration can be accomplished using the
+playbooks included with this role.
+
+This playbook will:
+
+1. Gather the necessary authentication secrets
+1. Find the public routes to the Management app and the cluster API
+1. Make a REST call to add this cluster as a container provider
+
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+```
+
+## Multiple Providers
+
+As well as providing playbooks to integrate your *current* container
+platform into the management service, this role includes a **tech
+preview** script which allows you to add multiple container platforms
+as container providers in any arbitrary MIQ/CFME server.
+
+Using the multiple-provider script requires manual configuration and
+setting an `EXTRA_VARS` parameter on the command-line.
+
+
+1. Copy the
+ [container_providers.yml](files/examples/container_providers.yml)
+ example somewhere, such as `/tmp/cp.yml`
+1. If you changed your CFME/MIQ name or password, update the
+ `hostname`, `user`, and `password` parameters in the
+ `management_server` key in the `container_providers.yml` file copy
+1. Fill in an entry under the `container_providers` key for *each* OCP
+ or Origin cluster you want to add as container providers
+
+**Parameters Which MUST Be Configured:**
+
+* `auth_key` - This is the token of a service account which has admin capabilities on the cluster.
+* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname.
+* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique.
+
+*Note*: You can obtain the `auth_key` bearer token from your clusters
+ with this command: `oc serviceaccounts get-token -n management-infra
+ management-admin`
+
+**Parameters Which MAY Be Configured:**
+
+* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443`
+* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time.
+
+
+Let's see an example describing the following scenario:
+
+* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml`
+* You're adding two OCP clusters
+* Your management server runs on `mgmt.example.com`
+
+You would customize `/tmp/cp.yml` as such:
+
+```yaml
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-prod.example.com"
+ name: OCP Production
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-test.example.com"
+ name: OCP Testing
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "mgmt.example.com"
+ user: admin
+ password: b3tt3r_p4SSw0rd
+```
+
+Then you will run the many-container-providers integration script. You
+**must** provide the path to the container providers configuration
+file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e`
+(or `--extra-vars`) parameter to set `container_providers_config` to
+the config file path.
+
+```
+$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
+ playbooks/byo/openshift-management/add_many_container_providers.yml
+```
+
+Afterwards you will find two new container providers in your
+management service. Navigate to `Compute` → `Containers` → `Providers`
+to see an overview.
# Uninstall
@@ -461,6 +581,40 @@ installation:
* `playbooks/byo/openshift-management/uninstall.yml`
+NFS export definitions and data stored on NFS exports are not
+automatically removed. You are urged to manually erase any data from
+old application or database deployments before attempting to
+initialize a new deployment.
+
+Failure to erase old PostgreSQL data can result in cascading
+errors. The postgres pod may enter a `crashloopbackoff` state. This
+will block the management pod from ever starting. The cause of the
+`crashloopbackoff` is due to incorrect file permissions on the
+database NFS export created during a previous deployment.
+
+To continue, erase all data from the postgres export and delete the
+pod (**not** the deployer pod). For example, if you have pods like
+such:
+
+```
+# oc get pods
+NAME READY STATUS RESTARTS AGE
+httpd-1-cx7fk 1/1 Running 1 21h
+manageiq-0 0/1 Running 1 21h
+memcached-1-vkc7p 1/1 Running 1 21h
+postgresql-1-deploy 1/1 Running 1 21h
+postgresql-1-6w2t4 0/1 CrashLoopBackOff 1 21h
+```
+
+Then you would:
+
+1. Erase the data from the database NFS export
+2. `oc delete postgresql-1-6w2t4`
+
+The postgres deployer pod will try to scale up a new postgres pod to
+replace the one you deleted. Once the postgres pod is running the
+manageiq pod will stop blocking and begin application initialization.
+
# Additional Information
The upstream project,
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index ebb56313f..8ba65b386 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports
openshift_management_storage_nfs_local_hostname: false
######################################################################
+# DEFAULT ACCOUNT INFORMATION
+######################################################################
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+openshift_management_username: admin
+openshift_management_password: smartvm
+
+######################################################################
# SCAFFOLDING - These are parameters we pre-seed that a user may or
# may not set later
######################################################################
diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml
new file mode 100644
index 000000000..661f62e4d
--- /dev/null
+++ b/roles/openshift_management/files/examples/container_providers.yml
@@ -0,0 +1,22 @@
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "OCP/Origin cluster hostname (providing API access)"
+ name: openshift-management
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+# Copy and update for as many OCP or Origin providers as you want to
+# add to your management service
+ # - connection_configurations:
+ # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ # hostname: "OCP/Origin cluster hostname (providing API access)"
+ # name: openshift-management
+ # port: 8443
+ # type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "Management server hostname (providing API access)"
+ user: admin
+ password: smartvm
diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py
new file mode 100644
index 000000000..3b7013d9a
--- /dev/null
+++ b/roles/openshift_management/filter_plugins/oo_management_filters.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Filter methods for the management role
+"""
+
+
+def oo_filter_container_providers(results):
+ """results - the result from posting the API calls for adding new
+providers"""
+ all_results = []
+ for result in results:
+ if 'results' in result['json']:
+ # We got an OK response
+ res = result['json']['results'][0]
+ all_results.append("Provider '{}' - Added successfully".format(res['name']))
+ elif 'error' in result['json']:
+ # This was a problem
+ all_results.append("Provider '{}' - Failed to add. Message: {}".format(
+ result['item']['name'], result['json']['error']['message']))
+ return all_results
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "oo_filter_container_providers": oo_filter_container_providers,
+ }
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
new file mode 100644
index 000000000..50a5252cc
--- /dev/null
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -0,0 +1,77 @@
+---
+- name: Ensure lib_openshift modules are available
+ include_role:
+ role: lib_openshift
+
+- name: Ensure OpenShift facts module is available
+ include_role:
+ role: openshift_facts
+
+- name: Ensure OpenShift facts are loaded
+ openshift_facts:
+
+- name: Ensure we use openshift_master_cluster_public_hostname if it is available
+ set_fact:
+ l_cluster_hostname: "{{ openshift.master.cluster_public_hostname }}"
+ when:
+ - openshift.master.cluster_public_hostname is defined
+
+- name: Ensure we default to the first master if openshift_master_cluster_public_hostname is unavailable
+ set_fact:
+ l_cluster_hostname: "{{ openshift.master.cluster_hostname }}"
+ when:
+ - l_cluster_hostname is not defined
+
+- name: Ensure the management SA Secrets are read
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+- name: Ensure the management SA bearer token is identified
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ no_log: True
+ register: sa_secret
+
+- name: Ensure the SA bearer token value is saved
+ set_fact:
+ management_bearer_token: "{{ sa_secret.results.decoded.token }}"
+
+- name: Ensure we have the public route to the management service
+ oc_route:
+ state: list
+ name: httpd
+ namespace: openshift-management
+ register: route
+
+- name: Ensure the management service route is saved
+ set_fact:
+ management_route: "{{ route.results.0.spec.host }}"
+
+- name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_route }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ openshift_management_username }}"
+ password: "{{ openshift_management_password }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body:
+ connection_configurations:
+ - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "{{ l_cluster_hostname }}"
+ name: "{{ openshift_management_project }}"
+ port: "{{ openshift.master.api_port }}"
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 86c4d0010..9be923a57 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -2,23 +2,33 @@
######################################################################)
# Users, projects, and privileges
-- name: Run pre-install CFME validation checks
+- name: Run pre-install Management validation checks
include: validate.yml
-- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists"
+# This creates a service account allowing Container Provider
+# integration (managing OCP/Origin via MIQ/Management)
+- name: Enable Container Provider Integration
+ include_role:
+ role: openshift_manageiq
+
+- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
oc_project:
state: present
name: "{{ openshift_management_project }}"
display_name: "{{ openshift_management_project_description }}"
-- name: Create and Authorize CFME Accounts
+- name: Create and Authorize Management Accounts
include: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
+- name: Determine the correct NFS host if required
+ include: storage/nfs_server.yml
+ when: openshift_management_storage_class in ['nfs', 'nfs_external']
+
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
-- name: Create required NFS exports for CFME app storage
+- name: Create required NFS exports for Management app storage
include: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
@@ -45,7 +55,7 @@
######################################################################
# APPLICATION TEMPLATE
-- name: Install the CFME app and PV templates
+- name: Install the Management app and PV templates
include: template.yml
######################################################################
@@ -71,9 +81,16 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App is created
+- name: Ensure the Management App is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_template_name }}"
create: True
params: "{{ openshift_management_template_parameters }}"
+
+- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max
+ command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+ register: app_seeding_logs
+ until: app_seeding_logs.stdout.find('Server starting complete') != -1
+ delay: 30
+ retries: 20
diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_management/tasks/noop.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
index 31c845725..d1b9a8d5c 100644
--- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -26,7 +26,7 @@
when:
- openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
-- name: Check if the CFME App PV has been created
+- name: Check if the Management App PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -34,7 +34,7 @@
name: "{{ openshift_management_flavor_short }}-app"
register: miq_app_pv_check
-- name: Check if the CFME DB PV has been created
+- name: Check if the Management DB PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -44,7 +44,7 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App PV is created
+- name: Ensure the Management App PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-app-pv"
@@ -55,7 +55,7 @@
NFS_HOST: "{{ openshift_management_nfs_server }}"
when: miq_app_pv_check.results.results == [{}]
-- name: Ensure the CFME DB PV is created
+- name: Ensure the Management DB PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 696808328..94e11137c 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -2,37 +2,6 @@
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
-- name: Ensure we save the local NFS server if one is provided
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
- when:
- - openshift_management_storage_nfs_local_hostname is defined
- - openshift_management_storage_nfs_local_hostname != False
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the local NFS server
- set_fact:
- openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
- when:
- - openshift_management_nfs_server is not defined
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the external NFS server
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
- when:
- - openshift_management_storage_class == "nfs_external"
-
-- name: Failed NFS server detection
- assert:
- that:
- - openshift_management_nfs_server is defined
- msg: |
- "Unable to detect an NFS server. The 'nfs_external'
- openshift_management_storage_class option requires that you set
- openshift_management_storage_nfs_external_hostname. NFS hosts detected
- for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
-
- name: Setting up NFS storage
block:
- name: Include the NFS Setup role tasks
diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml
new file mode 100644
index 000000000..a1b618137
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs_server.yml
@@ -0,0 +1,45 @@
+---
+- name: Ensure we save the local NFS server if one is provided
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
+ when:
+ - openshift_management_storage_nfs_local_hostname is defined
+ - openshift_management_storage_nfs_local_hostname != False
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the local NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
+ when:
+ - openshift_management_nfs_server is not defined
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the external NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
+ when:
+ - openshift_management_storage_class == "nfs_external"
+
+- name: Failed External NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ Unable to detect an NFS server. The 'nfs_external'
+ openshift_management_storage_class option requires that you
+ manually set openshift_management_storage_nfs_external_hostname
+ parameter.
+ when:
+ - openshift_management_storage_class == 'nfs_external'
+
+- name: Failed Local NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ Unable to detect an NFS server. The 'nfs'
+ openshift_management_storage_class option requires that you have
+ an 'nfs' inventory group or manually set the
+ openshift_management_storage_nfs_local_hostname parameter.
+ when:
+ - openshift_management_storage_class == 'nfs'
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
index 299158ac4..9f97cdcb9 100644
--- a/roles/openshift_management/tasks/template.yml
+++ b/roles/openshift_management/tasks/template.yml
@@ -15,7 +15,7 @@
# STANDARD PODIFIED DATABASE TEMPLATE
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME Server template has been created already
+ - name: Check if the Management Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -25,12 +25,12 @@
- when: miq_server_check.results.results == [{}]
block:
- - name: Copy over CFME Server template
+ - name: Copy over Management Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Server Template is created
+ - name: Ensure Management Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}"
@@ -41,9 +41,9 @@
######################################################################
# EXTERNAL DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template']
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
block:
- - name: Check if the CFME Ext-DB Server template has been created already
+ - name: Check if the Management Ext-DB Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -53,12 +53,12 @@
- when: miq_ext_db_server_check.results.results == [{}]
block:
- - name: Copy over CFME Ext-DB Server template
+ - name: Copy over Management Ext-DB Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Ext-DB Server Template is created
+ - name: Ensure Management Ext-DB Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-ext-db"
@@ -74,7 +74,7 @@
# Begin conditional PV template creations
# Required for the application server
-- name: Check if the CFME App PV template has been created already
+- name: Check if the Management App PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -84,12 +84,12 @@
- when: miq_app_pv_check.results.results == [{}]
block:
- - name: Copy over CFME App PV template
+ - name: Copy over Management App PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME App PV Template is created
+ - name: Ensure Management App PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-app-pv"
@@ -103,7 +103,7 @@
# Required for database if the installation is fully podified
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME DB PV template has been created already
+ - name: Check if the Management DB PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -113,12 +113,12 @@
- when: miq_db_pv_check.results.results == [{}]
block:
- - name: Copy over CFME DB PV template
+ - name: Copy over Management DB PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME DB PV Template is created
+ - name: Ensure Management DB PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml
index 8b20bdc5e..b22f36a4f 100644
--- a/roles/openshift_management/tasks/validate.yml
+++ b/roles/openshift_management/tasks/validate.yml
@@ -2,12 +2,25 @@
# Validate configuration parameters passed to the openshift_management role
######################################################################
+# BETA ACKNOWLEDGEMENT
+- name: Ensure BETA software notice has been acknowledged
+ assert:
+ that:
+ - openshift_management_install_beta | default(false) | bool
+ msg: |
+ openshift-management (CFME/MIQ) is currently BETA status. You
+ must set openshift_management_install_beta to true to
+ acknowledge that you accept this risk and understand that
+ support is limited or nonexistent.
+ when:
+ - openshift_deployment_type == 'openshift-enterprise'
+
+######################################################################
# CORE PARAMETERS
- name: Ensure openshift_management_app_template is valid
assert:
that:
- openshift_management_app_template in __openshift_management_app_templates
-
msg: |
"openshift_management_app_template must be one of {{
__openshift_management_app_templates | join(', ') }}"
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 3da861d03..fe78dea66 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -12,7 +12,7 @@ r_openshift_master_clean_install: false
r_openshift_master_etcd3_storage: false
r_openshift_master_os_firewall_enable: true
r_openshift_master_os_firewall_deny: []
-r_openshift_master_os_firewall_allow:
+default_r_openshift_master_os_firewall_allow:
- service: api server https
port: "{{ openshift.master.api_port }}/tcp"
- service: api controllers https
@@ -24,6 +24,8 @@ r_openshift_master_os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}"
+
# oreg_url is defined by user input
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
@@ -62,7 +64,7 @@ openshift_master_config_dir_default: "{{ (openshift.common.config_base | default
openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
-openshift_master_node_config_networkconfig_mtu: 1450
+openshift_master_node_config_networkconfig_mtu: "{{ openshift_node_sdn_mtu | default(1450) }}"
openshift_master_node_config_kubeletargs_cpu: 500m
openshift_master_node_config_kubeletargs_mem: 512M
@@ -71,7 +73,7 @@ openshift_master_bootstrap_enabled: False
openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}"
-openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}"
+openshift_master_config_imageconfig_format: "{{ openshift.node.registry_url }}"
# these are for the default settings in a generated node-config.yaml
openshift_master_node_config_default_edits:
@@ -101,7 +103,7 @@ openshift_master_node_config_default_edits:
value:
- 'true'
- key: networkConfig.mtu
- value: 8951
+ value: "{{ openshift_master_node_config_networkconfig_mtu }}"
- key: networkConfig.networkPluginName
value: "{{ r_openshift_master_sdn_network_plugin_name }}"
- key: networkPluginName
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index d5094c2c9..f88c4a7dc 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -9,10 +9,13 @@
notify:
- Verify API Server
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when:
- not (master_controllers_service_status_changed | default(false) | bool)
- openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml
index eee89743c..f837a8bae 100644
--- a/roles/openshift_master/tasks/bootstrap.yml
+++ b/roles/openshift_master/tasks/bootstrap.yml
@@ -1,26 +1,12 @@
---
-
-- name: ensure the node-bootstrap service account exists
- oc_serviceaccount:
- name: node-bootstrapper
- namespace: openshift-infra
- state: present
- run_once: true
-
-- name: grant node-bootstrapper the correct permissions to bootstrap
- oc_adm_policy_user:
- namespace: openshift-infra
- user: system:serviceaccount:openshift-infra:node-bootstrapper
- resource_kind: cluster-role
- resource_name: system:node-bootstrapper
- state: present
- run_once: true
-
# TODO: create a module for this command.
# oc_serviceaccounts_kubeconfig
- name: create service account kubeconfig with csr rights
command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra"
register: kubeconfig_out
+ until: kubeconfig_out.rc == 0
+ retries: 24
+ delay: 5
- name: put service account kubeconfig into a file on disk for bootstrap
copy:
@@ -42,6 +28,7 @@
--node-dir={{ mktempout.stdout }}/
--node=CONFIGMAP
--hostnames=test
+ --dns-ip=0.0.0.0
--certificate-authority={{ openshift_master_config_dir }}/ca.crt
--signer-cert={{ openshift_master_config_dir }}/ca.crt
--signer-key={{ openshift_master_config_dir }}/ca.key
diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml
deleted file mode 100644
index e641f84d4..000000000
--- a/roles/openshift_master/tasks/clean_systemd_units.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-
-- name: Disable master service
- systemd:
- name: "{{ openshift.common.service_type }}-master"
- state: stopped
- enabled: no
- masked: yes
- ignore_errors: true
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
index f79955e95..a16cbe78e 100644
--- a/roles/openshift_master/tasks/journald.yml
+++ b/roles/openshift_master/tasks/journald.yml
@@ -3,6 +3,11 @@
stat: path=/etc/systemd/journald.conf
register: journald_conf_file
+- name: Create journald persistence directories
+ file:
+ path: /var/log/journal
+ state: directory
+
- name: Update journald setup
replace:
dest: /etc/systemd/journald.conf
@@ -16,7 +21,9 @@
# I need to restart journald immediatelly, otherwise it gets into way during
# further steps in ansible
- name: Restart journald
- systemd:
- name: systemd-journald
- state: restarted
+ command: "systemctl restart systemd-journald"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when: journald_update | changed
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 63d483760..cde01c49e 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -11,6 +11,9 @@
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: master_oreg_auth_credentials_create.rc == 0
notify:
- restart master api
- restart master controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index fcc66044b..8420dfb8c 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -14,8 +14,22 @@
- include: registry_auth.yml
+- name: Disable the legacy master service if it exists
+ systemd:
+ name: "{{ openshift.common.service_type }}-master"
+ state: stopped
+ enabled: no
+ masked: yes
+ ignore_errors: true
+
- name: Remove the legacy master service if it exists
- include: clean_systemd_units.yml
+ file:
+ path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ state: absent
+ ignore_errors: true
+ when:
+ - openshift.master.cluster_method == "native"
+ - not openshift.common.is_master_system_container | bool
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
@@ -44,6 +58,17 @@
- l_create_ha_unit_files | changed
# end workaround for missing systemd unit files
+- name: enable master services
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ enabled: yes
+ with_items:
+ - api
+ - controllers
+ when:
+ - openshift.master.cluster_method == "native"
+ - not openshift.common.is_master_system_container | bool
+
- name: Preserve Master API Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
register: l_master_api_proxy
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 40775571f..c83fc9fbb 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -58,11 +58,12 @@ controllerConfig:
{% endif %}
controllers: '*'
corsAllowedOrigins:
+ # anchor with start (\A) and end (\z) of the string, make the check case insensitive ((?i)) and escape hostname
{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- - {{ origin }}
+ - (?i)\A{{ origin | regex_escape() }}\z
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- - {{ custom_origin }}
+ - (?i)\A{{ custom_origin | regex_escape() }}\z
{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
@@ -179,6 +180,11 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% if openshift.common.version_gte_3_7 | bool %}
+ clusterNetworks:
+ - cidr: {{ openshift.master.sdn_cluster_network_cidr }}
+ hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% endif %}
{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index a4f410296..97a5179e0 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -326,10 +326,8 @@ class IdentityProviderOauthBase(IdentityProviderBase):
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
- ''' validate this idp instance '''
- if self.challenge:
- raise errors.AnsibleFilterError("|failed provider {0} does not "
- "allow challenge authentication".format(self.__class__.__name__))
+ ''' validate an instance of this idp class '''
+ pass
class OpenIDIdentityProvider(IdentityProviderOauthBase):
@@ -428,6 +426,12 @@ class GoogleIdentityProvider(IdentityProviderOauthBase):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['hostedDomain', 'hosted_domain']]
+ def validate(self):
+ ''' validate this idp instance '''
+ if self.challenge:
+ raise errors.AnsibleFilterError("|failed provider {0} does not "
+ "allow challenge authentication".format(self.__class__.__name__))
+
class GitHubIdentityProvider(IdentityProviderOauthBase):
""" GitHubIdentityProvider
@@ -446,6 +450,12 @@ class GitHubIdentityProvider(IdentityProviderOauthBase):
self._optional += [['organizations'],
['teams']]
+ def validate(self):
+ ''' validate this idp instance '''
+ if self.challenge:
+ raise errors.AnsibleFilterError("|failed provider {0} does not "
+ "allow challenge authentication".format(self.__class__.__name__))
+
class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@@ -510,7 +520,7 @@ class FilterModule(object):
'master.kubelet-client.crt',
'master.kubelet-client.key']
if bool(include_ca):
- certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
+ certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 501be148e..cf0be3bef 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -88,7 +88,6 @@
controller_args: "{{ osm_controller_args | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
master_count: "{{ openshift_master_count | default(None) }}"
- controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index ce7688581..88b893448 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -4,8 +4,13 @@
when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
notify: Verify API Server
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- name: Verify API Server
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index 39323904f..b0b888d56 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -12,7 +12,7 @@
# l_nfs_export_name: Name of sub-directory of the export
# l_nfs_options: Mount Options
-- name: Ensure CFME App NFS export directory exists
+- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"
file:
path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
state: directory
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index b9f16dfd4..0c6d8db38 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -9,7 +9,7 @@ openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' el
openshift_image_tag: ''
-openshift_node_ami_prep_packages:
+default_r_openshift_node_image_prep_packages:
- "{{ openshift_service_type }}-master"
- "{{ openshift_service_type }}-node"
- "{{ openshift_service_type }}-docker-excluder"
@@ -33,7 +33,6 @@ openshift_node_ami_prep_packages:
- python-dbus
- PyYAML
- yum-utils
-- cloud-utils-growpart
# gluster
- glusterfs-fuse
# nfs
@@ -54,6 +53,7 @@ openshift_node_ami_prep_packages:
# - container-selinux
# - atomic
#
+r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}"
openshift_node_bootstrap: False
@@ -110,5 +110,8 @@ openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+openshift_node_config_dir_default: "/etc/origin/node"
+openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
+
openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 8c03f6c41..8cf41ab4c 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -3,7 +3,7 @@
package:
name: "{{ item }}"
state: present
- with_items: "{{ openshift_node_ami_prep_packages }}"
+ with_items: "{{ r_openshift_node_image_prep_packages }}"
- name: create the directory for node
file:
@@ -25,11 +25,11 @@
state: "{{ item.state | default('present') }}"
with_items:
# add the kubeconfig
- - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig"
+ - line: "KUBECONFIG={{ openshift_node_config_dir }}/bootstrap.kubeconfig"
regexp: "^KUBECONFIG=.*"
# remove the config file. This comes from openshift_facts
- - regexp: "^CONFIG_FILE=.*"
- state: absent
+ - line: "CONFIG_FILE={{ openshift_node_config_dir }}/node-config.yaml"
+ regexp: "^CONFIG_FILE=.*"
- name: include aws sysconfig credentials
include: aws.yml
@@ -76,7 +76,7 @@
state: link
force: yes
with_items:
- - /var/lib/origin/openshift.local.config/node/node-client-ca.crt
+ - "{{ openshift_node_config_dir }}/node-client-ca.crt"
- when: rpmgenerated_config.stat.exists
block:
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index de396fb4b..5e5e4f94a 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -11,6 +11,9 @@
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: node_oreg_auth_credentials_create.rc == 0
notify:
- restart node
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index eae832fcf..ebcff46b5 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1,2 +1,7 @@
---
openshift_node_dnsmasq_install_network_manager_hook: true
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 230f0a28c..f4e48b5b7 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -54,6 +54,8 @@ domain-needed
server=/cluster.local/172.30.0.1
server=/30.172.in-addr.arpa/172.30.0.1
enable-dbus
+dns-forward-max=5000
+cache-size=5000
EOF
# New config file, must restart
NEEDS_RESTART=1
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index ef3ba2880..6543c7c3e 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -3,5 +3,10 @@ domain-needed
no-negcache
max-cache-ttl=1
enable-dbus
-bind-interfaces
-listen-address={{ openshift.node.dns_ip }}
+dns-forward-max=5000
+cache-size=5000
+bind-dynamic
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
index de396fb4b..5e5e4f94a 100644
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -11,6 +11,9 @@
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: node_oreg_auth_credentials_create.rc == 0
notify:
- restart node
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 00995eee6..d217b90fb 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -6,16 +6,6 @@ openshift_prometheus_namespace: prometheus
openshift_prometheus_node_selector: {"region":"infra"}
-# image defaults
-openshift_prometheus_image_prefix: "openshift/"
-openshift_prometheus_image_version: "v2.0.0-dev.3"
-openshift_prometheus_proxy_image_prefix: "openshift/"
-openshift_prometheus_proxy_image_version: "v1.0.0"
-openshift_prometheus_alertmanager_image_prefix: "openshift/"
-openshift_prometheus_alertmanager_image_version: "v0.9.1"
-openshift_prometheus_alertbuffer_image_prefix: "openshift/"
-openshift_prometheus_alertbuffer_image_version: "v0.0.2"
-
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index 523a64334..5cc9a67eb 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -1,4 +1,9 @@
---
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
- name: Create temp directory for doing work in on target
command: mktemp -td openshift-prometheus-ansible-XXXXXX
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 916c57aa2..456db3a57 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -23,28 +23,28 @@ spec:
{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
nodeSelector:
{% for key, value in openshift_prometheus_node_selector.iteritems() %}
- {{key}}: "{{value}}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
# Deploy Prometheus behind an oauth proxy
- name: prom-proxy
- image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
+ image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}"
imagePullPolicy: IfNotPresent
resources:
requests:
{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
- memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
- cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
{% endif %}
limits:
{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
- memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
- cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
{% endif %}
ports:
- containerPort: 8443
@@ -79,22 +79,22 @@ spec:
- --storage.tsdb.min-block-duration=2m
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
- image: "{{openshift_prometheus_image_prefix}}prometheus:{{openshift_prometheus_image_version}}"
+ image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}"
imagePullPolicy: IfNotPresent
resources:
requests:
{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
- memory: "{{openshift_prometheus_memory_requests}}"
+ memory: "{{ openshift_prometheus_memory_requests }}"
{% endif %}
{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
- cpu: "{{openshift_prometheus_cpu_requests}}"
+ cpu: "{{ openshift_prometheus_cpu_requests }}"
{% endif %}
limits:
{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
memory: "{{ openshift_prometheus_memory_limit }}"
{% endif %}
{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
- cpu: "{{openshift_prometheus_cpu_limit}}"
+ cpu: "{{ openshift_prometheus_cpu_limit }}"
{% endif %}
volumeMounts:
@@ -105,22 +105,22 @@ spec:
# Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
- name: alerts-proxy
- image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
+ image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}"
imagePullPolicy: IfNotPresent
resources:
requests:
{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
- memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
- cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
{% endif %}
limits:
{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
- memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+ memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
- cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+ cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
{% endif %}
ports:
- containerPort: 9443
@@ -149,22 +149,22 @@ spec:
- name: alert-buffer
args:
- --storage-path=/alert-buffer/messages.db
- image: "{{openshift_prometheus_alertbuffer_image_prefix}}prometheus-alert-buffer:{{openshift_prometheus_alertbuffer_image_version}}"
+ image: "{{ l_openshift_prometheus_alertbuffer_image_prefix }}prometheus-alert-buffer:{{ l_openshift_prometheus_alertbuffer_image_version }}"
imagePullPolicy: IfNotPresent
resources:
requests:
{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
- memory: "{{openshift_prometheus_alertbuffer_memory_requests}}"
+ memory: "{{ openshift_prometheus_alertbuffer_memory_requests }}"
{% endif %}
{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
- cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}"
+ cpu: "{{ openshift_prometheus_alertbuffer_cpu_requests }}"
{% endif %}
limits:
{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
- memory: "{{openshift_prometheus_alertbuffer_memory_limit}}"
+ memory: "{{ openshift_prometheus_alertbuffer_memory_limit }}"
{% endif %}
{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
- cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}"
+ cpu: "{{ openshift_prometheus_alertbuffer_cpu_limit }}"
{% endif %}
volumeMounts:
- mountPath: /alert-buffer
@@ -176,22 +176,22 @@ spec:
- name: alertmanager
args:
- -config.file=/etc/alertmanager/alertmanager.yml
- image: "{{openshift_prometheus_alertmanager_image_prefix}}prometheus-alertmanager:{{openshift_prometheus_alertmanager_image_version}}"
+ image: "{{ l_openshift_prometheus_alertmanager_image_prefix }}prometheus-alertmanager:{{ l_openshift_prometheus_alertmanager_image_version }}"
imagePullPolicy: IfNotPresent
resources:
requests:
{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
- memory: "{{openshift_prometheus_alertmanager_memory_requests}}"
+ memory: "{{ openshift_prometheus_alertmanager_memory_requests }}"
{% endif %}
{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
- cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}"
+ cpu: "{{ openshift_prometheus_alertmanager_cpu_requests }}"
{% endif %}
limits:
{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
- memory: "{{openshift_prometheus_alertmanager_memory_limit}}"
+ memory: "{{ openshift_prometheus_alertmanager_memory_limit }}"
{% endif %}
{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
- cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}"
+ cpu: "{{ openshift_prometheus_alertmanager_cpu_limit }}"
{% endif %}
ports:
- containerPort: 9093
diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml
new file mode 100644
index 000000000..ad52a3125
--- /dev/null
+++ b/roles/openshift_prometheus/vars/default_images.yml
@@ -0,0 +1,12 @@
+---
+# image prefix defaults
+l_openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default('openshift/') }}"
+l_openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+
+# image version defaults
+l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0-dev.3') }}"
+l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}"
+l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}"
+l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}"
diff --git a/roles/openshift_prometheus/vars/openshift-enterprise.yml b/roles/openshift_prometheus/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..9bb4c99bb
--- /dev/null
+++ b/roles/openshift_prometheus/vars/openshift-enterprise.yml
@@ -0,0 +1,12 @@
+---
+# image prefix defaults
+l_openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
+l_openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}"
+
+# image version defaults
+l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v3.7') }}"
+l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v3.7') }}"
+l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v3.7') }}"
+l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v3.7') }}"
diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
index ac21a5e37..1e6aafd00 100644
--- a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
+++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
@@ -1,6 +1,8 @@
---
- name: Generate ClusterRoleBindings
- template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+ template:
+ src: clusterrolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-clusterrolebinding.yaml"
vars:
acct_name: provisioners-{{item}}
obj_name: run-provisioners-{{item}}
diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml
index e6cbb1bbf..fe5ff9f18 100644
--- a/roles/openshift_provisioners/tasks/generate_secrets.yaml
+++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml
@@ -1,6 +1,8 @@
---
- name: Generate secret for efs
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-secret.yaml"
vars:
name: efs
obj_name: "provisioners-efs"
diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
index 4fe0583ee..000f19994 100644
--- a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
+++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
@@ -1,6 +1,8 @@
---
- name: Generating serviceaccounts
- template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+ template:
+ src: serviceaccount.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-sa.yaml"
vars:
obj_name: provisioners-{{item}}
labels:
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 4a6e00513..6e8792446 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -9,7 +9,9 @@
changed_when: no
- name: Generate efs PersistentVolumeClaim
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pvc.yaml"
vars:
obj_name: "provisioners-efs"
size: "1Mi"
@@ -21,7 +23,9 @@
changed_when: no
- name: Generate efs PersistentVolume
- template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+ template:
+ src: pv.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pv.yaml"
vars:
obj_name: "provisioners-efs"
size: "1Mi"
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
index ba472f1c9..d6db81ab9 100644
--- a/roles/openshift_provisioners/tasks/install_support.yaml
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -1,16 +1,9 @@
---
-- name: Check for provisioners project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
- register: provisioners_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
-
-- name: Create provisioners project
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
- when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+- name: Set provisioners project
+ oc_project:
+ state: present
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ name: "{{ openshift_provisioners_project }}"
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
index f4128f9f0..f81b1617a 100644
--- a/roles/openshift_provisioners/templates/pv.j2
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -30,3 +30,4 @@ spec:
name: {{claim_name}}
namespace: {{openshift_provisioners_project}}
{% endif %}
+ storageClassName: ""
diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
index 83d503056..0dd8772eb 100644
--- a/roles/openshift_provisioners/templates/pvc.j2
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -23,4 +23,5 @@ spec:
resources:
requests:
storage: {{size}}
+ storageClassName: ""
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index d41245093..95ba9fe4c 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -10,6 +10,11 @@
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
+ - name: Remove openshift_additional.repo file
+ file:
+ dest: /etc/yum.repos.d/openshift_additional.repo
+ state: absent
+
- name: Create any additional repos that are defined
yum_repository:
description: "{{ item.description | default(item.name | default(item.id)) }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 5dccc9faf..70b236033 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -47,7 +47,7 @@
- name: Abort when openshift_release is invalid
when:
- openshift_release is defined
- - not openshift_release | match('\d+(\.\d+){1,3}$')
+ - not openshift_release | match('^\d+(\.\d+){1,3}$')
fail:
msg: |-
openshift_release is "{{ openshift_release }}" which is not a valid version string.
@@ -69,3 +69,21 @@
- openshift_clusterid is not defined
- openshift_cloudprovider_kind is defined
- openshift_cloudprovider_kind == 'aws'
+
+- name: Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive
+ fail:
+ msg: >
+ Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive,
+ do not set both to true. ansible_service_broker_install defaults to true.
+ when:
+ - ansible_service_broker_remove | default(false) | bool
+ - ansible_service_broker_install | default(true) | bool
+
+- name: Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive
+ fail:
+ msg: >
+ Ensure that template_service_broker_remove and template_service_broker_install are mutually exclusive,
+ do not set both to true. template_service_broker_remove defaults to true.
+ when:
+ - template_service_broker_remove | default(false) | bool
+ - template_service_broker_install | default(true) | bool
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index 9d55185c8..cd7bda2c6 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -60,7 +60,7 @@
register: apiserver_ca
- shell: >
- oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
register: get_apiservices
changed_when: no
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index aa3ec5724..3507330e3 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -38,7 +38,7 @@
- name: Make kube-service-catalog project network global
command: >
- oc adm pod-network make-projects-global kube-service-catalog
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
- include: generate_certs.yml
@@ -83,19 +83,19 @@
# only do this if we don't already have the updated role info
- name: Generate apply template for clusterrole/edit
template:
- src: sc_role_patching.j2
+ src: sc_admin_edit_role_patching.j2
dest: "{{ mktemp.stdout }}/edit_sc_patch.yml"
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
- oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -106,19 +106,42 @@
# only do this if we don't already have the updated role info
- name: Generate apply template for clusterrole/admin
template:
- src: sc_role_patching.j2
+ src: sc_admin_edit_role_patching.j2
dest: "{{ mktemp.stdout }}/admin_sc_patch.yml"
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
- oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+- oc_obj:
+ name: view
+ kind: clusterrole
+ state: list
+ register: view_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/view
+ template:
+ src: sc_view_role_patching.j2
+ dest: "{{ mktemp.stdout }}/view_sc_patch.yml"
+ vars:
+ original_content: "{{ view_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update view role for service catalog access
+ command: >
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
+ when:
+ - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index ca9844e79..a832e1f85 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,7 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
# TODO: this module doesn't currently remove this
#- name: Remove service catalog api service
@@ -48,7 +48,7 @@
- name: Remove Service Catalog kube-system Role Bindinds
shell: >
- oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f -
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
@@ -58,7 +58,7 @@
- name: Remove Service Catalog kube-service-catalog Role Bindinds
shell: >
- oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f -
+ {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 5d5352c1c..0e5bb7230 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -24,6 +24,7 @@ spec:
{% endfor %}
containers:
- args:
+ - apiserver
- --storage-type
- etcd
- --secure-port
@@ -45,7 +46,7 @@ spec:
- --feature-gates
- OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/apiserver"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: apiserver
ports:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 2272cbb44..e5e5f6b50 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -29,6 +29,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
args:
+ - controller-manager
- -v
- "5"
- --leader-election-namespace
@@ -38,7 +39,7 @@ spec:
- --feature-gates
- OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/controller-manager"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: controller-manager
ports:
diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
index 4629d5bb3..59cceafcf 100644
--- a/roles/openshift_service_catalog/templates/sc_role_patching.j2
+++ b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
@@ -12,6 +12,7 @@
- get
- list
- watch
+ - patch
- apiGroups:
- "settings.k8s.io"
attributeRestrictions: null
diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
new file mode 100644
index 000000000..838993854
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
@@ -0,0 +1,11 @@
+{{ original_content }}
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - serviceinstances
+ - servicebindings
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index d0bc0e028..abe411f67 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -119,13 +119,13 @@ are an exception:
Additionally, this role's behavior responds to the following registry-specific
variables:
-| Name | Default value | Description |
-|-----------------------------------------------|------------------------------|-----------------------------------------|
-| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
-| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
-| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
-| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
+| Name | Default value | Description |
+|-------------------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_storage_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_storage_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_storage_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_storage_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_storage_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
new file mode 100644
index 000000000..7b705c2d4
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
@@ -0,0 +1,135 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 074904bec..54a6dd7c3 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list
diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
new file mode 100644
index 000000000..030fa81c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure device mapper modules loaded
+ template:
+ src: glusterfs.conf
+ dest: /etc/modules-load.d/glusterfs.conf
+ register: km
+
+- name: load kernel modules
+ systemd:
+ name: systemd-modules-load.service
+ state: restarted
+ when: km | changed
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
new file mode 100644
index 000000000..dd4d6e6f7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -0,0 +1,4 @@
+#{{ ansible_managed }}
+dm_thin_pool
+dm_snapshot
+dm_mirror \ No newline at end of file
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..454e84aaf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
index b3a3d3428..622afb6bd 100644
--- a/roles/template_service_broker/files/openshift-ansible-catalog-console.js
+++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
@@ -1 +1 @@
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true;
+window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = true;
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 6a532a206..a78e4825b 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -45,7 +45,7 @@
oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
- | kubectl apply -f -
+ | {{ openshift.common.client_binary }} apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file
diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml
index d7ca970c7..da8aa291b 100644
--- a/roles/template_service_broker/tasks/main.yml
+++ b/roles/template_service_broker/tasks/main.yml
@@ -2,7 +2,7 @@
# do any asserts here
- include: install.yml
- when: template_service_broker_install | default(false) | bool
+ when: template_service_broker_install | default(true) | bool
- include: remove.yml
when: template_service_broker_remove | default(false) | bool