summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2018-02-14 14:28:33 -0800
committerGitHub <noreply@github.com>2018-02-14 14:28:33 -0800
commitb62c397f0625b9ff3654347a1777ed2277942712 (patch)
tree950a36359a9ac5e7d4a0b692ccdaf43e6f106463 /roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
parentdeb9a793cbb169b964424720f9c3a6ce6b976b09 (diff)
parent61df593d2047995f25327e54b32956944f413100 (diff)
downloadopenshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.gz
openshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.bz2
openshift-b62c397f0625b9ff3654347a1777ed2277942712.tar.xz
openshift-b62c397f0625b9ff3654347a1777ed2277942712.zip
Merge pull request #7097 from ewolinetz/logging_fresh_lg_cluster_fix
Automatic merge from submit-queue. Whenever we create a new es node ignore health checks, changing prome… …theus pw gen for increased secret idempotency Addresses https://bugzilla.redhat.com/show_bug.cgi?id=1540099 Whenever we are in a cluster sized > 1 the nodes required for recovery > 1. So when we have a fresh install we will not see the cluster start up because the number of required nodes is not met. Whenever we are creating a new node, we do not wait for the health check so that the logging playbook can complete and we can roll out all updated nodes. Also addresses prometheus pw generation so that each rerun of the playbook doesn't change the secret which triggers a full rollout of the cluster (assumes that keys/certs have changed).
Diffstat (limited to 'roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml')
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml7
1 files changed, 7 insertions, 0 deletions
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index 14f2313e1..01247dd5d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -65,6 +65,12 @@
{{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _cluster_dcs
+ # If we are currently restarting the "es" cluster we want to check if we are scaling up the number of es nodes
+ # If we are currently restarting the "es-ops" cluster we want to check if we are scaling up the number of ops nodes
+ # If we've created a new node for that cluster then the appropriate variable will be true, otherwise we default to false
+ - set_fact:
+ _skip_healthcheck: "{{ __logging_scale_up | default(false) if _cluster_component == 'es' else __logging_ops_scale_up | default(false) }}"
+
## restart all dcs for full restart
- name: "Restart ES node {{ _es_node }}"
include_tasks: restart_es_node.yml
@@ -94,6 +100,7 @@
{{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
register: _enable_output
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
+ when: _cluster_pods.stdout != ""
# Reenable external communication for {{ _cluster_component }}
- name: Reenable external communication for logging-{{ _cluster_component }}