summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml87
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml1
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml104
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml97
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml25
6 files changed, 191 insertions, 148 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 37c89374c..046535680 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -1,20 +1,17 @@
---
- name: Filter list of nodes to be upgraded if necessary
hosts: oo_first_master
+
+ roles:
+ - lib_openshift
+
tasks:
- name: Retrieve list of openshift nodes matching upgrade label
- command: >
- {{ openshift.common.client_binary }}
- get nodes
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- --selector={{ openshift_upgrade_nodes_label }}
- -o jsonpath='{.items[*].metadata.name}'
- register: matching_nodes
- changed_when: false
- when: openshift_upgrade_nodes_label is defined
-
- - set_fact:
- nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}"
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
when: openshift_upgrade_nodes_label is defined
# We got a list of nodes with the label, now we need to match these with inventory hosts
@@ -26,7 +23,9 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
- when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade
+ when:
+ - openshift_upgrade_nodes_label is defined
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 4135f7e94..b79e2f7fa 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,43 +9,37 @@
registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- - openshift_manageiq
- # Create the new templates shipped in 3.2, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- - openshift_hosted_templates
- # Update the existing templates
- - role: openshift_examples
- registry_url: "{{ openshift.master.registry_url }}"
- openshift_examples_import_command: replace
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- openshift_hosted_templates_import_command: replace
- pre_tasks:
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
# TODO: remove temp_skip_router_registry_upgrade variable. This is a short term hack
# to allow ops to use this control plane upgrade, without triggering router/registry
# upgrade which has not yet been synced with their process.
- name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ oc_obj:
+ state: list
+ kind: pods
+ all_namespaces: True
+ selector: 'router'
register: all_routers
- failed_when: false
- changed_when: false
when: temp_skip_router_registry_upgrade is not defined
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ - set_fact: haproxy_routers="{{ (all_routers.reults.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when:
+ - all_routers.results.returncode == 0
+ - temp_skip_router_registry_upgrade is not defined
- set_fact: haproxy_routers=[]
- when: all_routers.rc != 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - all_routers.results.returncode != 0
+ - temp_skip_router_registry_upgrade is not defined
- name: Update router image to current version
- when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - all_routers.results.returncode == 0
+ - temp_skip_router_registry_upgrade is not defined
command: >
{{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
@@ -53,33 +47,56 @@
with_items: "{{ haproxy_routers }}"
- name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
+ oc_obj:
+ state: list
+ kind: dc
+ name: docker-registry
register: _default_registry
- failed_when: false
- changed_when: false
when: temp_skip_router_registry_upgrade is not defined
- name: Update registry image to current version
- when: _default_registry.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - _default_registry.rc == 0
+ - temp_skip_router_registry_upgrade is not defined
command: >
{{ oc_cmd }} patch dc/docker-registry -n default -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
--api-version=v1
+ roles:
+ - openshift_manageiq
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ - openshift_hosted_templates
+ # Update the existing templates
+ - role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_examples_import_command: replace
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_hosted_templates_import_command: replace
+
# Check for warnings to be printed at the end of the upgrade:
- name: Check for warnings
hosts: oo_masters_to_config
tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- - command: >
- grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
+ - name: grep pluginOrderOverride
+ command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
when: openshift.common.version_gte_3_3_or_1_3 | bool
- failed_when: false
+ changed_when: false
+
- name: Warn if pluginOrderOverride is in use in master-config.yaml
- debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
- when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0
+ debug:
+ msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
+ when:
+ - not grep_plugin_order_override | skipped
+ - grep_plugin_order_override.rc == 0
- include: ../reset_excluder.yml
tags:
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 9621d0d1a..188ea246c 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -81,7 +81,6 @@
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
name: "{{ item }}"
namespace: "{{ openshift_logging_namespace }}"
state: absent
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 69d2b2b6b..3f1bd7ad2 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -1,12 +1,14 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- check_mode: no
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: start fluentd
@@ -19,107 +21,113 @@
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 7826efabe..ee6c800f2 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -1,11 +1,14 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: stop fluentd
@@ -18,101 +21,113 @@
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index 0dc31932c..0421cdf58 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -8,29 +8,34 @@
start_cluster: False
# start ES so that we can run migrate script
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+- name: Wait for pods to stop
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: running_pod
- until: running_pod.stdout != ''
+ until: running_pod.results.results.items[?(@.status.phase == "Running")].metadata.name != ''
retries: 30
delay: 10
- changed_when: no
- check_mode: no
- name: Run upgrade script
script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}