summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging_elasticsearch/tasks/main.yaml
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2018-02-14 14:28:33 -0800
committerGitHub <noreply@github.com>2018-02-14 14:28:33 -0800
commitb62c397f0625b9ff3654347a1777ed2277942712 (patch)
tree950a36359a9ac5e7d4a0b692ccdaf43e6f106463 /roles/openshift_logging_elasticsearch/tasks/main.yaml
parentdeb9a793cbb169b964424720f9c3a6ce6b976b09 (diff)
parent61df593d2047995f25327e54b32956944f413100 (diff)
downloadopenshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.gz
openshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.bz2
openshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.xz
openshift-b62c397f0625b9ff3654347a1777ed2277942712.zip
Merge pull request #7097 from ewolinetz/logging_fresh_lg_cluster_fix
Automatic merge from submit-queue. Whenever we create a new es node ignore health checks, changing prome… …theus pw gen for increased secret idempotency Addresses https://bugzilla.redhat.com/show_bug.cgi?id=1540099 Whenever we are in a cluster sized > 1 the nodes required for recovery > 1. So when we have a fresh install we will not see the cluster start up because the number of required nodes is not met. Whenever we are creating a new node, we do not wait for the health check so that the logging playbook can complete and we can roll out all updated nodes. Also addresses prometheus pw generation so that each rerun of the playbook doesn't change the secret which triggers a full rollout of the cluster (assumes that keys/certs have changed).
Diffstat (limited to 'roles/openshift_logging_elasticsearch/tasks/main.yaml')
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml21
1 files changed, 14 insertions, 7 deletions
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 64e5a3a1f..441460b2d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -138,15 +138,22 @@
- "prometheus_out.stderr | length > 0"
- "'already exists' not in prometheus_out.stderr"
-- set_fact:
- _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+- name: Checking for passwd.yml
+ stat: path="{{ generated_certs_dir }}/passwd.yml"
+ register: passwd_file
+ check_mode: no
-- template:
+- when: not passwd_file.stat.exists
+ template:
src: passwd.j2
- dest: "{{mktemp.stdout}}/passwd.yml"
+ dest: "{{ generated_certs_dir }}/passwd.yml"
vars:
logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}"
- logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}"
+ logging_user_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/passwd.yml"
+ register: _logging_metrics_proxy_passwd
# View role and binding
- name: Generate logging-elasticsearch-view-role
@@ -296,7 +303,7 @@
- name: admin.jks
path: "{{ generated_certs_dir }}/system.admin.jks"
- name: passwd.yml
- path: "{{mktemp.stdout}}/passwd.yml"
+ path: "{{ generated_certs_dir }}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -433,7 +440,7 @@
es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
- basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
+ basic_auth_passwd: "{{ ( _logging_metrics_proxy_passwd['content'] | b64decode | from_yaml )[openshift_logging_elasticsearch_prometheus_sa]['passwd'] }}"
es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"