summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorRussell Teague <rteague@redhat.com>2016-12-09 14:40:43 -0500
committerRussell Teague <rteague@redhat.com>2016-12-12 13:54:24 -0500
commitbe97433dd559a3bdae4baedda20a7f17bd47450b (patch)
tree6f89b1ec06c19a9de87a5d1108d8ba9b6f069f34 /roles
parentbf3fa6162880e2dff9c23d42ceb2197e071ba570 (diff)
downloadopenshift-be97433dd559a3bdae4baedda20a7f17bd47450b.tar.gz
openshift-be97433dd559a3bdae4baedda20a7f17bd47450b.tar.bz2
openshift-be97433dd559a3bdae4baedda20a7f17bd47450b.tar.xz
openshift-be97433dd559a3bdae4baedda20a7f17bd47450b.zip
YAML Linting
* Added checks to make ci for yaml linting * Modified y(a)ml files to pass lint checks
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/meta/main.yml4
-rw-r--r--roles/docker/tasks/main.yml18
-rw-r--r--roles/flannel_register/defaults/main.yaml1
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml2
-rw-r--r--roles/nuage_ca/meta/main.yml2
-rw-r--r--roles/nuage_common/defaults/main.yaml1
-rw-r--r--roles/nuage_master/defaults/main.yaml2
-rw-r--r--roles/nuage_master/meta/main.yml14
-rw-r--r--roles/nuage_master/tasks/certificates.yml8
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/meta/main.yml16
-rw-r--r--roles/nuage_node/tasks/certificates.yml6
-rw-r--r--roles/nuage_node/tasks/iptables.yml2
-rw-r--r--roles/nuage_node/tasks/main.yaml22
-rw-r--r--roles/nuage_node/vars/main.yaml4
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/aws.yml1
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml1
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_examples/defaults/main.yml4
-rw-r--r--roles/openshift_expand_partition/meta/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml1
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml86
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml348
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml1
-rw-r--r--roles/openshift_manageiq/vars/main.yml53
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml4
-rw-r--r--roles/openshift_master_facts/vars/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_metrics/vars/main.yaml7
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml25
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_repos/vars/main.yml2
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml3
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml1
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml2
-rw-r--r--roles/rhel_subscribe/meta/main.yml3
40 files changed, 354 insertions, 345 deletions
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index c5c95c0d2..dadd62c93 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
- - role: os_firewall
- os_firewall_use_firewalld: False
+- role: os_firewall
+ os_firewall_use_firewalld: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a2b18baa1..a93bdc2ad 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -86,16 +86,16 @@
line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
notify:
- - restart docker
+ - restart docker
when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
- name: Set various Docker options
lineinfile:
@@ -109,7 +109,7 @@
{% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- - restart docker
+ - restart docker
- name: Start the Docker service
systemd:
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index b1279aa88..ddf8230ec 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -8,4 +8,3 @@ etcd_conf_dir: "{{ openshift.common.config_base }}/master"
etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
-
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
index be6ca6b88..7ed028138 100644
--- a/roles/kube_nfs_volumes/meta/main.yml
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -13,5 +13,5 @@ galaxy_info:
versions:
- all
categories:
- - cloud
+ - cloud
dependencies: []
diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml
index 2b06613f3..36838debc 100644
--- a/roles/nuage_ca/meta/main.yml
+++ b/roles/nuage_ca/meta/main.yml
@@ -1,6 +1,6 @@
---
galaxy_info:
- author: Vishal Patil
+ author: Vishal Patil
description:
company: Nuage Networks
license: Apache License, Version 2.0
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index 16dac8720..a7803c0ee 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -1,3 +1,4 @@
+---
nuage_ca_master: "{{ groups.oo_first_master.0 }}"
nuage_ca_master_crt_dir: /usr/share/nuage-openshift-certificates
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
index cf670a9e1..c90f4f443 100644
--- a/roles/nuage_master/defaults/main.yaml
+++ b/roles/nuage_master/defaults/main.yaml
@@ -1,4 +1,4 @@
---
nuage_master_cspadminpasswd: ""
nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
+nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index b2a47ef71..a8a9bd3b4 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,10 +13,10 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_ca
- - role: nuage_common
- - role: openshift_etcd_client_certificates
- - role: os_firewall
- os_firewall_allow:
- - service: openshift-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_ca
+- role: nuage_common
+- role: openshift_etcd_client_certificates
+- role: os_firewall
+ os_firewall_allow:
+ - service: openshift-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml
index 0a2f375cd..c16616e1c 100644
--- a/roles/nuage_master/tasks/certificates.yml
+++ b/roles/nuage_master/tasks/certificates.yml
@@ -1,11 +1,11 @@
---
- name: Create a directory to hold the certificates
file: path="{{ nuage_mon_rest_server_crt_dir }}" state=directory
- delegate_to: "{{ nuage_ca_master }}"
+ delegate_to: "{{ nuage_ca_master }}"
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_mon_rest_server_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_master_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
+ file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index b8eaede3b..d211d30e8 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,13 +1,13 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
- file: path=/usr/share/nuage-openshift-monitor state=directory
+ file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
-- name: Install Nuage Openshift Monitor
+- name: Install Nuage Openshift Monitor
become: yes
yum: name={{ nuage_openshift_rpm }} state=present
@@ -17,12 +17,12 @@
become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
-- include: certificates.yml
+- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
become: yes
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index b395eba99..dba399a03 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,3 +1,4 @@
+---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
@@ -6,7 +7,7 @@ ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig
-kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
+kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
nuage_mon_rest_server_url: "0.0.0.0:{{ nuage_mon_rest_server_port }}"
nuage_mon_rest_server_logdir: "{{ nuage_openshift_monitor_log_dir | default('/var/log/nuage-openshift-monitor') }}"
@@ -14,18 +15,18 @@ nuage_mon_log_level: "{{ nuage_openshift_monitor_log_level | default('3') }}"
nuage_mon_rest_server_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.key"
-nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
+nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(openshift.common.hostname) }}"
-nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_master_crt_dir: /usr/share/nuage-openshift-monitor
nuage_service_account: system:serviceaccount:default:nuage
nuage_service_account_config:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nuage
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
nuage_tasks:
- - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index f96318611..3e2a5e0c9 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -13,11 +13,11 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_common
- - role: nuage_ca
- - role: os_firewall
- os_firewall_allow:
- - service: vxlan
- port: 4789/udp
- - service: nuage-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_common
+- role: nuage_ca
+- role: os_firewall
+ os_firewall_allow:
+ - service: vxlan
+ port: 4789/udp
+ - service: nuage-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml
index 7fcd4274d..d1c8bf59a 100644
--- a/roles/nuage_node/tasks/certificates.yml
+++ b/roles/nuage_node/tasks/certificates.yml
@@ -5,7 +5,7 @@
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_plugin_rest_client_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_plugin_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
+ file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml
index 52935f075..8e2c29620 100644
--- a/roles/nuage_node/tasks/iptables.yml
+++ b/roles/nuage_node/tasks/iptables.yml
@@ -5,7 +5,7 @@
always_run: yes
- name: Allow traffic from overlay to underlay
- command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
+ command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
when: "'nuage-overlay-underlay' not in iptablesrules.stdout"
notify:
- save iptable rules
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 2ec4be2c2..d82dd36a4 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -2,16 +2,16 @@
- name: Install Nuage VRS
become: yes
yum: name={{ vrs_rpm }} state=present
-
-- name: Set the uplink interface
+
+- name: Set the uplink interface
become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
-- name: Set the Active Controller
+- name: Set the Active Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
-- name: Set the Standby Controller
+- name: Set the Standby Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
@@ -24,18 +24,18 @@
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
- include: certificates.yml
-- name: Set the vsp-openshift.yaml
+- name: Set the vsp-openshift.yaml
become: yes
- template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
+ template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
- - restart node
+ - restart node
- include: iptables.yml
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 86486259f..7b789152f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -17,6 +17,6 @@ plugin_log_level: "{{ nuage_plugin_log_level | default('err') }}"
nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
-nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
-nuage_plugin_crt_dir : /usr/share/vsp-openshift
+nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index 6a4e919e8..1f44b29b9 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -15,10 +15,9 @@
no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
-
+
- name: Set builddefaults config structure
openshift_facts:
role: builddefaults
local_facts:
config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
-
diff --git a/roles/openshift_cloud_provider/tasks/aws.yml b/roles/openshift_cloud_provider/tasks/aws.yml
index 127a5b392..5fa8773f5 100644
--- a/roles/openshift_cloud_provider/tasks/aws.yml
+++ b/roles/openshift_cloud_provider/tasks/aws.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index 14ad8ba94..ee4048911 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index c9a44b3f5..0a476ac26 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -4,11 +4,11 @@
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with flannel
+ msg: Nuage sdn can not be used with flannel
when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
- fail:
@@ -46,4 +46,3 @@
command: >
hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
-
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index c690c5243..613c237a3 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -9,7 +9,7 @@
additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
- log_driver: "{{ openshift_docker_log_driver | default(None) }}"
+ log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index e843049f9..fc4b56bbf 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -12,8 +12,8 @@ examples_base: "{{ openshift.common.config_base if openshift.common.is_container
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
rhel_image_streams:
- - "{{ image_streams_base}}/image-streams-rhel7.json"
- - "{{ image_streams_base}}/dotnet_imagestreams.json"
+ - "{{ image_streams_base}}/image-streams-rhel7.json"
+ - "{{ image_streams_base}}/dotnet_imagestreams.json"
db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
diff --git a/roles/openshift_expand_partition/meta/main.yml b/roles/openshift_expand_partition/meta/main.yml
index a596d6c63..dea6b6ee0 100644
--- a/roles/openshift_expand_partition/meta/main.yml
+++ b/roles/openshift_expand_partition/meta/main.yml
@@ -13,6 +13,6 @@ galaxy_info:
versions:
- all
categories:
- - openshift
- - cloud
+ - openshift
+ - cloud
dependencies: []
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 7b1b3f6ff..e56a68e27 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,3 +1,4 @@
+---
- fail:
msg: >
Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
index 8754616d9..70b0d67a4 100644
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
@@ -1,59 +1,59 @@
---
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
- - name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
+- name: "Checking for logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+ register: logging_project
+ failed_when: "'FAILED' in logging_project.stderr"
- - name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+- name: "Changing projects"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
- - name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
+- name: "Cleanup any previous logging infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+ with_items:
+ - kibana
+ - fluentd
+ - elasticsearch
+ ignore_errors: yes
- - name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
+- name: "Cleanup existing support infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+ ignore_errors: yes
- - name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+- name: "Cleanup existing secrets"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+ ignore_errors: yes
+ register: clean_result
+ failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
- - name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+- name: "Cleanup existing logging deployers"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
- - name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+- name: "Cleanup logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
- - name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
+- name: "Remove deployer template"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+ register: delete_output
+ failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
- - debug: msg="Success!"
+- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 625af9acd..513a74c69 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -1,175 +1,175 @@
---
- - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
- - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
- - name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
- - name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
- - name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
- - name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
-
- - name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
- - name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
- - name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
-
- - name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
-
- - name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
-
- - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
-
- - name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
-
- - name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
- - name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
- - name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
- - name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
- - name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
- - name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
- - name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
- - name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
+ when: target_registry is defined and target_registry
+
+- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+ when: "openshift_hosted_logging_hostname is not defined or
+ openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+ openshift_hosted_logging_master_public_url is not defined"
+
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+- name: "Check for logging project already exists"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
+ register: logging_project_result
+ ignore_errors: True
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ when: logging_project_result.stdout == ""
+
+- name: "Changing projects"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
+
+- name: "Creating logging deployer secret"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
+ register: secret_output
+ failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+- name: "Create templates for logging accounts and the deployer"
+ command: >
+ {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f {{ hosted_base }}/logging-deployer.yaml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ -n logging
+ register: logging_import_template
+ failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
+ changed_when: "'created' in logging_import_template.stdout"
+
+- name: "Process the logging accounts template"
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
+ register: process_deployer_accounts
+ failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
+
+- name: "Set permissions for logging-deployer service account"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+ register: permiss_output
+ failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+- name: "Set permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+- name: "Set additional permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
+ add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user rolebinding-reader \
+ system:serviceaccount:logging:aggregated-logging-elasticsearch
+ register: rolebinding_reader_output
+ failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+
+- name: "Create ConfigMap for deployer parameters"
+ command: >
+ {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
+ register: deployer_configmap_output
+ failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+
+- name: "Process the deployer template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
+ register: process_deployer
+ failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
+
+- name: "Wait for image pull and deployer pod"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 15
+
+- name: "Process imagestream template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: process_is
+ failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
+
+- name: "Set insecured registry"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+
+- name: "Wait for imagestreams to become available"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 and 'not found' not in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Wait for component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es
+ - kibana
+ - curator
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for ops component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es-ops
+ - kibana-ops
+ - curator-ops
+ when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for fluentd DaemonSet to exist"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Deploy fluentd by labeling the node"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
+
+- name: "Wait for fluentd to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- debug:
+ msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 11412733b..33320e9c8 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -1,3 +1,4 @@
+---
tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 37d4679ef..3f24fd6be 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,13 +1,14 @@
+---
manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: management-infra-admin
+ rules:
+ - resources:
+ - pods/proxy
+ verbs:
+ - '*'
manageiq_metrics_admin_clusterrole:
apiVersion: v1
@@ -24,28 +25,28 @@ manageiq_metrics_admin_clusterrole:
- '*'
manageiq_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: management-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: management-admin
manageiq_image_inspector_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: inspector-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: inspector-admin
manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
manage_iq_tasks:
- - policy add-role-to-user -n management-infra admin -z management-admin
- - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
- - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
- - policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- policy add-role-to-user -n management-infra admin -z management-admin
+- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
+- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
- - policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index e2b722abd..39ea42ab3 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,3 +1,4 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks. For that reason the ha_svc variables are use set_fact instead of
# the vars directory on the role.
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index f5923ecf8..0dba4b3ba 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -92,8 +92,8 @@
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
- kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
+ kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index a5ad580e7..fa745eb66 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -23,4 +23,3 @@ builddefaults_yaml:
value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- name: no_proxy
value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
-
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index be3256f02..68e4a48b9 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -38,9 +38,9 @@
get pods -l {{ item }} | grep -q Running
register: metrics_pods_status
with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
failed_when: false
changed_when: false
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 0331bcb89..6c207d6ac 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -1,6 +1,7 @@
+---
hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
metrics_deployer_sa:
apiVersion: v1
@@ -8,7 +9,7 @@ metrics_deployer_sa:
metadata:
name: metrics-deployer
secrets:
- - name: metrics-deployer
+ - name: metrics-deployer
hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 8b669a2c6..626c47387 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,3 +1,4 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
@@ -68,12 +69,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+ - regex: '^IMAGE_VERSION='
+ line: "IMAGE_VERSION={{ openshift_image_tag }}"
notify:
- restart node
@@ -84,12 +85,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ - regex: '^HTTP_PROXY='
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+ - regex: '^HTTPS_PROXY='
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+ - regex: '^NO_PROXY='
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index 4d1bd3794..d5fda7bd0 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -1,2 +1,2 @@
---
-- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." \ No newline at end of file
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index 319611a0b..da48e42c1 100644
--- a/roles/openshift_repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
@@ -4,4 +4,4 @@
# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']
+known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
index 8715fc64e..b8cbe9a84 100644
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
@@ -1,3 +1,4 @@
+---
####
#
# OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
@@ -9,7 +10,7 @@
path: /tmp/openshift
state: directory
owner: root
- mode: 700
+ mode: 0700
- name: Create service account configs
template:
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index 1ff9e6dcb..d83ccf7de 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -1,3 +1,4 @@
+---
- name: test if service accounts exists
command: >
{{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index bed1216f8..ea7c9bb45 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -13,5 +13,5 @@ galaxy_info:
versions:
- all
categories:
- - openshift
+ - openshift
dependencies: []
diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml
index 6204a5aa5..0bbeadd34 100644
--- a/roles/rhel_subscribe/meta/main.yml
+++ b/roles/rhel_subscribe/meta/main.yml
@@ -1,2 +1,3 @@
+---
dependencies:
-- role: openshift_facts
+ - role: openshift_facts