summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml22
-rw-r--r--playbooks/init/evaluate_groups.yml2
-rw-r--r--playbooks/openshift-etcd/private/ca.yml1
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml2
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/master_etcd_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml1
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml5
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml1
-rw-r--r--playbooks/openshift-master/private/scaleup.yml2
-rw-r--r--playbooks/openshift-node/private/etcd_client_config.yml1
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py2
-rw-r--r--roles/openshift_etcd_facts/defaults/main.yml2
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml1
-rw-r--r--roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml44
-rw-r--r--roles/openshift_examples/files/examples/v3.9/cfme-templates/jboss-middleware-manager-template.yaml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml8
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml14
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml6
-rw-r--r--roles/openshift_master/tasks/system_container.yml4
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml2
23 files changed, 74 insertions, 57 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 463a05688..4902b9ecd 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -94,25 +94,3 @@
state: started
enabled: yes
with_items: "{{ master_services }}"
-
-# Until openshift-ansible is determining which host is the CA host we
-# must (unfortunately) ensure that the first host in the etcd group is
-# the etcd CA host.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1469358
-- name: Verify we can proceed on first etcd
- hosts: oo_first_etcd
- gather_facts: no
- tasks:
- - name: Ensure CA exists on first etcd
- stat:
- path: /etc/etcd/generated_certs
- register: __etcd_ca_stat
-
- - fail:
- msg: >
- In order to correct an etcd certificate signing problem
- upgrading may require re-generating etcd certificates. Please
- ensure that the /etc/etcd/generated_certs directory exists on
- the first host defined in your [etcd] group.
- when:
- - not __etcd_ca_stat.stat.exists | bool
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index e8bf1892c..81d7d63ca 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -51,7 +51,7 @@
upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
for documentation on how to migrate from embedded to external etcd.
when:
- - g_etcd_hosts | default([]) | length not in [5,3,1]
+ - g_etcd_hosts | default([]) | length == 0
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index 72c39d546..77e7b0ed0 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -10,7 +10,6 @@
tasks_from: ca.yml
vars:
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
when:
- etcd_ca_setup | default(True) | bool
diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index 2f9bef799..e1354de67 100644
--- a/playbooks/openshift-etcd/private/certificates-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -1,6 +1,6 @@
---
- name: Backup and remove generated etcd certificates
- hosts: oo_first_etcd
+ hosts: oo_etcd_to_config
any_errors_fatal: true
tasks:
- import_role:
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 35407969e..bbc952d8e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -22,7 +22,6 @@
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- role: nickhammond.logrotate
diff --git a/playbooks/openshift-etcd/private/master_etcd_certificates.yml b/playbooks/openshift-etcd/private/master_etcd_certificates.yml
index d98470db2..4e4972dba 100644
--- a/playbooks/openshift-etcd/private/master_etcd_certificates.yml
+++ b/playbooks/openshift-etcd/private/master_etcd_certificates.yml
@@ -5,9 +5,7 @@
roles:
- role: openshift_etcd_facts
- role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index a3acf6945..55409e503 100644
--- a/playbooks/openshift-etcd/private/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -45,7 +45,6 @@
tasks_from: distribute_ca.yml
vars:
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index 8a9811a25..162a5eba7 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -12,8 +12,6 @@
hosts: oo_new_etcd_to_config
serial: 1
any_errors_fatal: true
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
pre_tasks:
- name: Add new etcd members to cluster
command: >
@@ -42,7 +40,6 @@
- role: openshift_etcd
when: etcd_add_check.rc == 0
etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
@@ -66,8 +63,6 @@
hosts: oo_masters_to_config
serial: 1
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| lib_utils_oo_collect('openshift.common.hostname')
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index ebcf4a5ff..0abfe1650 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -9,6 +9,5 @@
name: etcd
tasks_from: server_certificates.yml
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 20ebf70d3..5aaa0b156 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -45,7 +45,7 @@
- import_playbook: set_network_facts.yml
-- import_playbook: ../../openshift-etcd/private/certificates.yml
+- import_playbook: ../../openshift-etcd/private/master_etcd_certificates.yml
- import_playbook: config.yml
diff --git a/playbooks/openshift-node/private/etcd_client_config.yml b/playbooks/openshift-node/private/etcd_client_config.yml
index c3fa38a81..148bdc769 100644
--- a/playbooks/openshift-node/private/etcd_client_config.yml
+++ b/playbooks/openshift-node/private/etcd_client_config.yml
@@ -6,6 +6,5 @@
- role: openshift_etcd_facts
- role: openshift_etcd_client_certificates
etcd_cert_prefix: flannel.etcd-
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index 574743ff1..c355115b5 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -126,7 +126,7 @@ def lib_utils_oo_collect(data_list, attribute=None, filters=None):
raise errors.AnsibleFilterError(
"lib_utils_oo_collect expects filter to be a dict")
retval.extend([get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))])
+ all([get_attr(d, key) == filters[key] for key in filters]))])
else:
retval.extend([get_attr(d, attribute) for d in data])
diff --git a/roles/openshift_etcd_facts/defaults/main.yml b/roles/openshift_etcd_facts/defaults/main.yml
new file mode 100644
index 000000000..d13e7c912
--- /dev/null
+++ b/roles/openshift_etcd_facts/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+etcd_ca_host_group: "oo_etcd_to_config"
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index ed97d539c..86546f4e3 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1 +1,2 @@
---
+- import_tasks: set_etcd_ca_host.yml
diff --git a/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml b/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml
new file mode 100644
index 000000000..bf8d28a9b
--- /dev/null
+++ b/roles/openshift_etcd_facts/tasks/set_etcd_ca_host.yml
@@ -0,0 +1,44 @@
+---
+- name: Check for CA indicator files
+ stat:
+ path: "{{ item.0 }}"
+ delegate_to: "{{ item.1 }}"
+ with_nested:
+ - - /etc/etcd/ca
+ - /etc/etcd/generated_certs
+ - "{{ groups[etcd_ca_host_group] }}"
+ register: __etcd_ca_host_stat
+ run_once: true
+
+# Collect ansible_host (inventory hostname) of hosts with /etc/etcd/ca
+# and /etc/etcd/generated_certs directories.
+- set_fact:
+ __etcd_ca_dir_hosts: "{{ __etcd_ca_host_stat.results
+ | lib_utils_oo_collect('_ansible_delegated_vars.ansible_host',
+ filters={'stat.path':'/etc/etcd/ca','stat.exists':True}) }}"
+ __etcd_generated_certs_dir_hosts: "{{ __etcd_ca_host_stat.results
+ | lib_utils_oo_collect('_ansible_delegated_vars.ansible_host',
+ filters={'stat.path':'/etc/etcd/generated_certs','stat.exists':True}) }}"
+ run_once: true
+
+# __etcd_ca_hosts is the intersection of hosts which have /etc/etcd/ca
+# and /etc/etcd/generated_certs directories.
+- set_fact:
+ __etcd_ca_hosts: "{{ __etcd_ca_dir_hosts | intersect(__etcd_generated_certs_dir_hosts) }}"
+ run_once: true
+
+# __etcd_ca_hosts should only contain one host. If more than one host
+# is able to be an etcd CA host then we will use the first.
+- set_fact:
+ etcd_ca_host: "{{ __etcd_ca_hosts[0] }}"
+ when:
+ - __etcd_ca_hosts | length > 0
+ - etcd_ca_host is not defined
+
+# No etcd_ca_host was found in __etcd_ca_hosts. This is probably a
+# fresh installation so we will default to the first member of the
+# etcd host group.
+- set_fact:
+ etcd_ca_host: "{{ groups[etcd_ca_host_group].0 }}"
+ when:
+ - etcd_ca_host is not defined
diff --git a/roles/openshift_examples/files/examples/v3.9/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v3.9/cfme-templates/jboss-middleware-manager-template.yaml
index bbc0c7044..6ec1c0fed 100644
--- a/roles/openshift_examples/files/examples/v3.9/cfme-templates/jboss-middleware-manager-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.9/cfme-templates/jboss-middleware-manager-template.yaml
@@ -197,7 +197,7 @@ objects:
spec:
containers:
- image: ${CASSANDRA_IMAGE}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-cassandra
env:
- name: DATA_VOLUME
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 11a51b6bb..8d0f615b5 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -18,7 +18,7 @@ import socket
import ipaddress
from distutils.util import strtobool
from distutils.version import LooseVersion
-from ansible.module_utils.six import u
+from ansible.module_utils.six import text_type
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
@@ -1153,7 +1153,7 @@ def set_proxy_facts(facts):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
- kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1])
+ kube_svc_ip = str(ipaddress.ip_network(text_type(common['portal_net']))[1])
common['no_proxy'].append(kube_svc_ip)
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
index 16de6f252..7c4ecaa5b 100644
--- a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -1,21 +1,21 @@
---
- command: >
- oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ {{ openshift_client_binary }} get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Getting ES version for logging-es cluster"
command: >
- oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
register: _curl_output
when: _cluster_pods.stdout_lines | count > 0
- command: >
- oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ {{ openshift_client_binary }} get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _ops_cluster_pods
- name: "Getting ES version for logging-es-ops cluster"
command: >
- oc exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ {{ openshift_client_binary }} exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
register: _ops_curl_output
when: _ops_cluster_pods.stdout_lines | count > 0
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 8a174f0d5..9db67ea9b 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -258,7 +258,7 @@
_restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
- shell: >
- oc get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+ {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
register: _es_dcs
- set_fact:
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index 879459cf6..14f2313e1 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -1,13 +1,13 @@
---
## get all pods for the cluster
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
### Check for cluster state before making changes -- if its red then we don't want to continue
- name: "Checking current health for {{ _es_node }} cluster"
shell: >
- oc exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+ {{ openshift_client_binary }} exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
register: _pod_status
when: _cluster_pods.stdout_lines | count > 0
@@ -46,7 +46,7 @@
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
command: >
- oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
register: _disable_output
changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
when: _cluster_pods.stdout_lines | count > 0
@@ -54,7 +54,7 @@
# Flush ES
- name: "Flushing for logging-{{ _cluster_component }} cluster"
command: >
- oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
register: _flush_output
changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
when:
@@ -62,7 +62,7 @@
- full_restart_cluster | bool
- command: >
- oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ {{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _cluster_dcs
## restart all dcs for full restart
@@ -86,12 +86,12 @@
## we may need a new first pod to run against -- fetch them all again
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
command: >
- oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
register: _enable_output
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
index fe15e40fd..a1e172168 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -1,7 +1,7 @@
---
- name: "Rolling out new pod(s) for {{ _es_node }}"
command: >
- oc rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
+ {{ openshift_client_binary }} rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
- name: "Waiting for {{ _es_node }} to finish scaling up"
oc_obj:
@@ -21,12 +21,12 @@
- name: Gettings name(s) of replica pod(s)
command: >
- oc get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ {{ openshift_client_binary }} get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _pods
- name: "Waiting for ES to be ready for {{ _es_node }}"
shell: >
- oc exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+ {{ openshift_client_binary }} exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
with_items: "{{ _pods.stdout.split(' ') }}"
loop_control:
loop_var: _pod
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index dcbf7fd9f..21a29a204 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -18,6 +18,8 @@
state: latest
values:
- COMMAND=api
+ - "NODE_SERVICE={{ openshift_service_type }}-node.service"
+ - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- name: Install or Update HA controller master system container
oc_atomic_container:
@@ -26,3 +28,5 @@
state: latest
values:
- COMMAND=controllers
+ - "NODE_SERVICE={{ openshift_service_type }}-node.service"
+ - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 008f209d7..e1a533e8e 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -13,7 +13,7 @@
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift_service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}-master-controllers.service"
- 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}'
state: latest
vars: