summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--filter_plugins/oo_filters.py23
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml93
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
-rw-r--r--roles/cockpit-ui/meta/main.yml2
-rw-r--r--roles/cockpit-ui/tasks/main.yml142
-rw-r--r--roles/lib_openshift/library/oadm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py5
-rw-r--r--roles/lib_openshift/library/oc_env.py5
-rw-r--r--roles/lib_openshift/library/oc_label.py4
-rw-r--r--roles/lib_openshift/library/oc_obj.py5
-rw-r--r--roles/lib_openshift/library/oc_process.py5
-rw-r--r--roles/lib_openshift/library/oc_route.py3
-rw-r--r--roles/lib_openshift/library/oc_scale.py5
-rw-r--r--roles/lib_openshift/library/oc_secret.py5
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py3
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/src/class/oadm_manage_node.py2
-rw-r--r--roles/lib_openshift/src/class/oc_edit.py5
-rw-r--r--roles/lib_openshift/src/class/oc_env.py5
-rw-r--r--roles/lib_openshift/src/class/oc_label.py4
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py5
-rw-r--r--roles/lib_openshift/src/class/oc_process.py5
-rw-r--r--roles/lib_openshift/src/class/oc_route.py3
-rw-r--r--roles/lib_openshift/src/class/oc_scale.py5
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py5
-rw-r--r--roles/lib_openshift/src/class/oc_serviceaccount.py3
-rw-r--r--roles/lib_openshift/src/class/oc_serviceaccount_secret.py2
-rw-r--r--roles/openshift_facts/tasks/main.yml104
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml16
-rw-r--r--roles/openshift_hosted_templates/tasks/main.yml2
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml1
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml52
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml114
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml107
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml25
-rw-r--r--roles/openshift_manage_node/meta/main.yml15
-rw-r--r--roles/openshift_manage_node/tasks/main.yml66
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml7
-rw-r--r--roles/openshift_metrics/defaults/main.yaml2
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml6
46 files changed, 517 insertions, 517 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index c9390efe6..a833a5d93 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -242,6 +242,28 @@ def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
+def oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
+ """Take a dict and arrange them as a list of dicts
+
+ Input data:
+ {'region': 'infra', 'test_k': 'test_v'}
+
+ Return data:
+ [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
+
+ Written for use of the oc_label module
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ rval = []
+ for label in data.items():
+ rval.append({key_title: label[0], value_title: label[1]})
+
+ return rval
+
+
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
@@ -951,6 +973,7 @@ class FilterModule(object):
"oo_ec2_volume_definition": oo_ec2_volume_definition,
"oo_combine_key_value": oo_combine_key_value,
"oo_combine_dict": oo_combine_dict,
+ "oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 13e1da961..5d3280328 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -22,12 +22,24 @@
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
+
+ roles:
+ - lib_openshift
+
tasks:
- - name: Prepare for Node draining
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
+ - name: Mark node unschedulable
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+ - inventory_hostname in groups.oo_nodes_to_upgrade
- name: Drain Node for Kubelet upgrade
command: >
@@ -39,7 +51,12 @@
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
- command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 45aabf3e4..7ef79afa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -29,12 +29,18 @@
- name: Check available disk space for etcd backup
shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
register: avail_disk
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
# TODO: replace shell module with command and update later checks
- name: Check current embedded etcd disk usage
shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
register: etcd_disk_usage
when: embedded_etcd | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
- name: Abort if insufficient disk space for etcd backup
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 690858c53..a9b5b94e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -9,21 +9,36 @@
register: etcd_rpm_version
failed_when: false
when: not openshift.common.is_containerized | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
- name: Record containerized etcd version
command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version
failed_when: false
when: openshift.common.is_containerized | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
- name: Record containerized etcd version
command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version
failed_when: false
when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
- name: Record containerized etcd version
command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version
failed_when: false
when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
# through hosts, then loop through tasks only when appropriate
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 37c89374c..046535680 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -1,20 +1,17 @@
---
- name: Filter list of nodes to be upgraded if necessary
hosts: oo_first_master
+
+ roles:
+ - lib_openshift
+
tasks:
- name: Retrieve list of openshift nodes matching upgrade label
- command: >
- {{ openshift.common.client_binary }}
- get nodes
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- --selector={{ openshift_upgrade_nodes_label }}
- -o jsonpath='{.items[*].metadata.name}'
- register: matching_nodes
- changed_when: false
- when: openshift_upgrade_nodes_label is defined
-
- - set_fact:
- nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}"
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
when: openshift_upgrade_nodes_label is defined
# We got a list of nodes with the label, now we need to match these with inventory hosts
@@ -26,7 +23,9 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
- when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade
+ when:
+ - openshift_upgrade_nodes_label is defined
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 4135f7e94..f0191e380 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,77 +9,100 @@
registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- - openshift_manageiq
- # Create the new templates shipped in 3.2, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- - openshift_hosted_templates
- # Update the existing templates
- - role: openshift_examples
- registry_url: "{{ openshift.master.registry_url }}"
- openshift_examples_import_command: replace
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- openshift_hosted_templates_import_command: replace
- pre_tasks:
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
# TODO: remove temp_skip_router_registry_upgrade variable. This is a short term hack
# to allow ops to use this control plane upgrade, without triggering router/registry
# upgrade which has not yet been synced with their process.
- name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ oc_obj:
+ state: list
+ kind: pods
+ all_namespaces: True
+ selector: 'router'
register: all_routers
- failed_when: false
- changed_when: false
when: temp_skip_router_registry_upgrade is not defined
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ - set_fact: haproxy_routers="{{ (all_routers.reults.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when:
+ - all_routers.results.returncode == 0
+ - temp_skip_router_registry_upgrade is not defined
- set_fact: haproxy_routers=[]
- when: all_routers.rc != 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - all_routers.results.returncode != 0
+ - temp_skip_router_registry_upgrade is not defined
- name: Update router image to current version
- when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - all_routers.results.returncode == 0
+ - temp_skip_router_registry_upgrade is not defined
command: >
{{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
--api-version=v1
with_items: "{{ haproxy_routers }}"
+ # AUDIT:changed_when_note: `false` not being set here. What we
+ # need to do is check the current router image version and see if
+ # this task needs to be ran.
- name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
+ oc_obj:
+ state: list
+ kind: dc
+ name: docker-registry
register: _default_registry
- failed_when: false
- changed_when: false
when: temp_skip_router_registry_upgrade is not defined
- name: Update registry image to current version
- when: _default_registry.rc == 0 and temp_skip_router_registry_upgrade is not defined
+ when:
+ - _default_registry.results.results[0] != {}
+ - temp_skip_router_registry_upgrade is not defined
command: >
{{ oc_cmd }} patch dc/docker-registry -n default -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
--api-version=v1
+ # AUDIT:changed_when_note: `false` not being set here. What we
+ # need to do is check the current registry image version and see
+ # if this task needs to be ran.
+
+ roles:
+ - openshift_manageiq
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ - openshift_hosted_templates
+ # Update the existing templates
+ - role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_examples_import_command: replace
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
- name: Check for warnings
hosts: oo_masters_to_config
tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- - command: >
- grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
+ - name: grep pluginOrderOverride
+ command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
when: openshift.common.version_gte_3_3_or_1_3 | bool
- failed_when: false
+ changed_when: false
+
- name: Warn if pluginOrderOverride is in use in master-config.yaml
- debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
- when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0
+ debug:
+ msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
+ when:
+ - not grep_plugin_order_override | skipped
+ - grep_plugin_order_override.rc == 0
- include: ../reset_excluder.yml
tags:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index db2c27919..a4aefcdac 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -238,29 +238,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -268,17 +261,19 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e45b635f7..e3a98fd9b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,29 +7,22 @@
any_errors_fatal: true
pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
-
- name: Mark node unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
- # NOTE: There is a transient "object has been modified" error here, allow a couple
- # retries for a more reliable upgrade.
- register: node_unsched
- until: node_unsched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
- name: Drain Node for Kubelet upgrade
command: >
@@ -37,20 +30,22 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
+ - lib_openshift
- openshift_facts
- docker
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: was_schedulable | bool
- register: node_sched
- until: node_sched.rc == 0
- retries: 3
- delay: 1
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
- include: ../reset_excluder.yml
tags:
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml
index 6ad2e324a..4d619fff6 100644
--- a/roles/cockpit-ui/meta/main.yml
+++ b/roles/cockpit-ui/meta/main.yml
@@ -11,3 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index f2ef4f161..8bd68787a 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,86 +1,58 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- changed_when: False
-
-- name: Determine if docker-registry service exists
- command: >
- {{ openshift.common.client_binary }} get svc/docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: check_docker_registry_exists
- failed_when: false
- changed_when: false
-
-- name: Create passthrough route for docker-registry
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_docker_registry_route
- changed_when: "'already exists' not in create_docker_registry_route.stderr"
- failed_when: "'already exists' not in create_docker_registry_route.stderr and create_docker_registry_route.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Create passthrough route for registry-console
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service registry-console
- --port registry-console
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_registry_console_route
- changed_when: "'already exists' not in create_registry_console_route.stderr"
- failed_when: "'already exists' not in create_registry_console_route.stderr and create_registry_console_route.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Retrieve docker-registry route
- command: >
- {{ openshift.common.client_binary }} get route docker-registry
- -o jsonpath='{.spec.host}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_route
- changed_when: false
- when: check_docker_registry_exists.rc == 0
-
-- name: Retrieve cockpit kube url
- command: >
- {{ openshift.common.client_binary }} get route registry-console
- -o jsonpath='https://{.spec.host}'
- -n default
- register: registry_console_cockpit_kube_url
- changed_when: false
- when: check_docker_registry_exists.rc == 0
-
-# TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX
-- name: Deploy registry-console
- command: >
- {{ openshift.common.client_binary }} new-app --template=registry-console
- {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
- {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
- -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.stdout }}"
- -p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}"
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: deploy_registry_console
- changed_when: "'already exists' not in deploy_registry_console.stderr"
- failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- block:
+ - name: Create passthrough route for docker-registry
+ oc_route:
+ kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
+ name: docker-registry
+ namespace: default
+ service_name: docker-registry
+ state: present
+ tls_termination: passthrough
+ register: docker_registry_route
+
+ - name: Create passthrough route for registry-console
+ oc_route:
+ kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
+ name: registry-console
+ namespace: default
+ service_name: registry-console
+ state: present
+ tls_termination: passthrough
+ register: registry_console_cockpit_kube
+
+ # XXX: Required for items still using command
+ - name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+ - set_fact:
+ openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+
+ - name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
+ changed_when: False
+
+ # TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX
+ - name: Deploy registry-console
+ command: >
+ {{ openshift.common.client_binary }} new-app --template=registry-console
+ {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
+ {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
+ -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: deploy_registry_console
+ changed_when: "'already exists' not in deploy_registry_console.stderr"
+ failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
+
+ - name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ # XXX: End required for items still using command
+ run_once: true
diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py
index 8e217ac28..6c0ff9b13 100644
--- a/roles/lib_openshift/library/oadm_manage_node.py
+++ b/roles/lib_openshift/library/oadm_manage_node.py
@@ -1296,7 +1296,7 @@ class ManageNode(OpenShiftCLI):
config,
verbose=False):
''' Constructor for ManageNode '''
- super(ManageNode, self).__init__(None, config.kubeconfig)
+ super(ManageNode, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
def evacuate(self):
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 11b87a015..a565b32f2 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1314,13 +1314,10 @@ class Edit(OpenShiftCLI):
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(Edit, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
- self.kubeconfig = kubeconfig
self.separator = separator
- self.verbose = verbose
def get(self):
'''return a secret by name '''
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 06c242db6..e00f5cdcc 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1630,13 +1630,10 @@ class OCEnv(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCEnv, self).__init__(namespace, kubeconfig)
+ super(OCEnv, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
- self.namespace = namespace
self.env_vars = env_vars
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._resource = None
@property
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index f67eb2552..e168614bd 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1294,11 +1294,9 @@ class OCLabel(OpenShiftCLI):
selector=None,
verbose=False):
''' Constructor for OCLabel '''
- super(OCLabel, self).__init__(namespace, kubeconfig)
+ super(OCLabel, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
- self.namespace = namespace
self.kind = kind
- self.kubeconfig = kubeconfig
self.labels = labels
self._curr_labels = None
self.selector = selector
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index e4b8ac26c..d73d05472 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1296,14 +1296,11 @@ class OCObject(OpenShiftCLI):
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftOC '''
- super(OCObject, self).__init__(namespace, kubeconfig,
+ super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.namespace = namespace
self.name = rname
self.selector = selector
- self.kubeconfig = kubeconfig
- self.verbose = verbose
def get(self):
'''return a kind by name '''
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 702cb02d4..bcb4d2289 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1286,14 +1286,11 @@ class OCProcess(OpenShiftCLI):
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCProcess, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(OCProcess, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = tname
self.data = tdata
self.params = params
self.create = create
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._template = None
@property
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 982a43ba3..d5dc84116 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1461,9 +1461,8 @@ class OCRoute(OpenShiftCLI):
config,
verbose=False):
''' Constructor for OCVolume '''
- super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
- self.namespace = config.namespace
self._route = None
@property
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 48a629b5e..be3b7f837 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1629,13 +1629,10 @@ class OCScale(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
- super(OCScale, self).__init__(namespace, kubeconfig)
+ super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
- self.namespace = namespace
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._resource = None
@property
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 526474f17..8598cb0ec 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1418,12 +1418,9 @@ class OCSecret(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCSecret, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
- self.kubeconfig = kubeconfig
self.decode = decode
- self.verbose = verbose
def get(self):
'''return a secret by name '''
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index cd0847963..fcc5bbfa7 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1396,9 +1396,8 @@ class OCServiceAccount(OpenShiftCLI):
config,
verbose=False):
''' Constructor for OCVolume '''
- super(OCServiceAccount, self).__init__(config.namespace, config.kubeconfig)
+ super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
- self.namespace = config.namespace
self.service_account = None
def exists(self):
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index e22ccbfc2..ef10162c2 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1391,7 +1391,7 @@ class OCServiceAccountSecret(OpenShiftCLI):
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCServiceAccountSecret, self).__init__(config.namespace, config.kubeconfig)
+ super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
diff --git a/roles/lib_openshift/src/class/oadm_manage_node.py b/roles/lib_openshift/src/class/oadm_manage_node.py
index 61b6a5ebe..c07320477 100644
--- a/roles/lib_openshift/src/class/oadm_manage_node.py
+++ b/roles/lib_openshift/src/class/oadm_manage_node.py
@@ -23,7 +23,7 @@ class ManageNode(OpenShiftCLI):
config,
verbose=False):
''' Constructor for ManageNode '''
- super(ManageNode, self).__init__(None, config.kubeconfig)
+ super(ManageNode, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
def evacuate(self):
diff --git a/roles/lib_openshift/src/class/oc_edit.py b/roles/lib_openshift/src/class/oc_edit.py
index 0734e2085..629e5a007 100644
--- a/roles/lib_openshift/src/class/oc_edit.py
+++ b/roles/lib_openshift/src/class/oc_edit.py
@@ -13,13 +13,10 @@ class Edit(OpenShiftCLI):
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(Edit, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
- self.kubeconfig = kubeconfig
self.separator = separator
- self.verbose = verbose
def get(self):
'''return a secret by name '''
diff --git a/roles/lib_openshift/src/class/oc_env.py b/roles/lib_openshift/src/class/oc_env.py
index d34c8234e..748b46cb5 100644
--- a/roles/lib_openshift/src/class/oc_env.py
+++ b/roles/lib_openshift/src/class/oc_env.py
@@ -21,13 +21,10 @@ class OCEnv(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCEnv, self).__init__(namespace, kubeconfig)
+ super(OCEnv, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
- self.namespace = namespace
self.env_vars = env_vars
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._resource = None
@property
diff --git a/roles/lib_openshift/src/class/oc_label.py b/roles/lib_openshift/src/class/oc_label.py
index 8e1ba9ceb..bd312c170 100644
--- a/roles/lib_openshift/src/class/oc_label.py
+++ b/roles/lib_openshift/src/class/oc_label.py
@@ -17,11 +17,9 @@ class OCLabel(OpenShiftCLI):
selector=None,
verbose=False):
''' Constructor for OCLabel '''
- super(OCLabel, self).__init__(namespace, kubeconfig)
+ super(OCLabel, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
- self.namespace = namespace
self.kind = kind
- self.kubeconfig = kubeconfig
self.labels = labels
self._curr_labels = None
self.selector = selector
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 2ec20e72c..21129a50c 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -16,14 +16,11 @@ class OCObject(OpenShiftCLI):
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftOC '''
- super(OCObject, self).__init__(namespace, kubeconfig,
+ super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.namespace = namespace
self.name = rname
self.selector = selector
- self.kubeconfig = kubeconfig
- self.verbose = verbose
def get(self):
'''return a kind by name '''
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
index 80d81448d..9d29938aa 100644
--- a/roles/lib_openshift/src/class/oc_process.py
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -17,14 +17,11 @@ class OCProcess(OpenShiftCLI):
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCProcess, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(OCProcess, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = tname
self.data = tdata
self.params = params
self.create = create
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._template = None
@property
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index 42388ad0b..cb743e19d 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -11,9 +11,8 @@ class OCRoute(OpenShiftCLI):
config,
verbose=False):
''' Constructor for OCVolume '''
- super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
- self.namespace = config.namespace
self._route = None
@property
diff --git a/roles/lib_openshift/src/class/oc_scale.py b/roles/lib_openshift/src/class/oc_scale.py
index 16255688b..6c3ceb8cf 100644
--- a/roles/lib_openshift/src/class/oc_scale.py
+++ b/roles/lib_openshift/src/class/oc_scale.py
@@ -15,13 +15,10 @@ class OCScale(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
- super(OCScale, self).__init__(namespace, kubeconfig)
+ super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
- self.namespace = namespace
- self.kubeconfig = kubeconfig
- self.verbose = verbose
self._resource = None
@property
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index e99999c37..5eac27572 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -17,12 +17,9 @@ class OCSecret(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCSecret, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
+ super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
- self.kubeconfig = kubeconfig
self.decode = decode
- self.verbose = verbose
def get(self):
'''return a secret by name '''
diff --git a/roles/lib_openshift/src/class/oc_serviceaccount.py b/roles/lib_openshift/src/class/oc_serviceaccount.py
index 47c7b5c94..d6777afc1 100644
--- a/roles/lib_openshift/src/class/oc_serviceaccount.py
+++ b/roles/lib_openshift/src/class/oc_serviceaccount.py
@@ -12,9 +12,8 @@ class OCServiceAccount(OpenShiftCLI):
config,
verbose=False):
''' Constructor for OCVolume '''
- super(OCServiceAccount, self).__init__(config.namespace, config.kubeconfig)
+ super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
- self.namespace = config.namespace
self.service_account = None
def exists(self):
diff --git a/roles/lib_openshift/src/class/oc_serviceaccount_secret.py b/roles/lib_openshift/src/class/oc_serviceaccount_secret.py
index 750a74d33..4f1c8c926 100644
--- a/roles/lib_openshift/src/class/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/src/class/oc_serviceaccount_secret.py
@@ -7,7 +7,7 @@ class OCServiceAccountSecret(OpenShiftCLI):
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
- super(OCServiceAccountSecret, self).__init__(config.namespace, config.kubeconfig)
+ super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 9a1982076..11bd68207 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,52 +1,64 @@
---
-- name: Detecting Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
+- block:
+ - name: Detecting Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
-# Locally setup containerized facts for now
-- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
- l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
- l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
- l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+ # Locally setup containerized facts for now
+ - set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+ - set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
+ l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
+ l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
+ l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
-- name: Ensure various deps are installed
- package: name={{ item }} state=present
- with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ - name: Ensure various deps are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_packages }}"
+ when: not l_is_atomic | bool
-- name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
- # TODO: Deprecate deployment_type in favor of openshift_deployment_type
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- system_images_registry: "{{ system_images_registry | default('') }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
+ role: common
+ local_facts:
+ debug_level: "{{ openshift_debug_level | default(2) }}"
+ # TODO: Deprecate deployment_type in favor of openshift_deployment_type
+ deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ system_images_registry: "{{ system_images_registry | default('') }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-- name: Set repoquery command
+ - name: Set repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ # This `when` allows us to skip this expensive block of tasks on
+ # subsequent calls to the `openshift_facts` role. You will notice
+ # speed-ups in proportion to the size of your cluster as this will
+ # skip all tasks on the next calls to the `openshift_facts` role.
+ when:
+ - openshift_facts_init is not defined
+
+- name: Record that openshift_facts has initialized
set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+ openshift_facts_init: true
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 556da5304..8b44b94c6 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,13 +1,13 @@
---
- name: Create passthrough route for docker-registry
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_docker_registry_route
- changed_when: "'already exists' not in create_docker_registry_route.stderr"
- failed_when: "'already exists' not in create_docker_registry_route.stderr and create_docker_registry_route.rc != 0"
+ oc_route:
+ kubeconfig: "{{ openshift_hosted_kubeconfig }}"
+ name: docker-registry
+ namespace: default
+ service_name: docker-registry
+ state: present
+ tls_termination: passthrough
+ run_once: true
- name: Determine if registry certificate must be created
stat:
diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml
index 7d176bce3..89b92dfcc 100644
--- a/roles/openshift_hosted_templates/tasks/main.yml
+++ b/roles/openshift_hosted_templates/tasks/main.yml
@@ -4,6 +4,8 @@
become: False
register: copy_hosted_templates_mktemp
run_once: True
+ # AUDIT:changed_when: not set here because this task actually
+ # creates something
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" .
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index bdb168921..d9eebe688 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -19,7 +19,7 @@ openshift_logging_curator_memory_limit: null
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default(kibana.{{openshift.common.dns_domain}}) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.{{openshift.common.dns_domain}}') }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 9621d0d1a..188ea246c 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -81,7 +81,6 @@
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
name: "{{ item }}"
namespace: "{{ openshift_logging_namespace }}"
state: absent
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
deleted file mode 100644
index ebe8f1ca8..000000000
--- a/roles/openshift_logging/tasks/label_node.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
- -o jsonpath='{.metadata.labels}'
- register: node_labels
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}}
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - value is defined
- - node_labels.stdout is defined
- - label not in node_labels.stdout
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
- -o jsonpath='{.metadata.labels.{{ label }}}'
- register: label_value
- ignore_errors: yes
- changed_when: no
- when:
- - value is defined
- - node_labels.stdout is defined
- - label in node_labels.stdout
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - value is defined
- - label_value.stdout is defined
- - label_value.stdout != value
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - unlabel is defined
- - unlabel
- - not ansible_check_mode
- - label in node_labels.stdout
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 69d2b2b6b..3e97487dc 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -1,125 +1,133 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- check_mode: no
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: start fluentd
- include: label_node.yaml
- vars:
- host: "{{fluentd_host}}"
- label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ oc_label:
+ name: "{{ fluentd_host }}"
+ kind: node
+ state: add
+ label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 7826efabe..bae6aebbb 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -1,118 +1,133 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: stop fluentd
- include: label_node.yaml
- vars:
- host: "{{fluentd_host}}"
- label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- unlabel: True
+ oc_label:
+ name: "{{ fluentd_host }}"
+ kind: node
+ state: absent
+ label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index 0dc31932c..0421cdf58 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -8,29 +8,34 @@
start_cluster: False
# start ES so that we can run migrate script
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+- name: Wait for pods to stop
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: running_pod
- until: running_pod.stdout != ''
+ until: running_pod.results.results.items[?(@.status.phase == "Running")].metadata.name != ''
retries: 30
delay: 10
- changed_when: no
- check_mode: no
- name: Run upgrade script
script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml
new file mode 100644
index 000000000..d90cd28cf
--- /dev/null
+++ b/roles/openshift_manage_node/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Manage Node
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index c06758833..9a883feed 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -1,23 +1,4 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- set_fact:
- openshift_manage_node_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_manage_node_kubeconfig }}
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
@@ -46,38 +27,37 @@
run_once: true
- name: Wait for Node Registration
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} get node {{ openshift.node.nodename }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- register: omd_get_node
- until: omd_get_node.rc == 0
+ oc_obj:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: list
+ register: get_node
+ until: "'metadata' in get_node.results.results[0]"
retries: 50
delay: 5
- changed_when: false
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Label nodes
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- when: "'nodename' in openshift.node and 'labels' in openshift.node and openshift.node.labels != {}"
- delegate_to: "{{ openshift_master_host }}"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}"
+ namespace: default
+ when:
+ - "'nodename' in openshift.node"
+ - "'labels' in openshift.node"
+ - openshift.node.labels != {}
delegate_to: "{{ openshift_master_host }}"
- run_once: true
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index e58947fd2..f202486a5 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -47,6 +47,9 @@
register: oshawkular_create_cluster_role
failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
changed_when: oshawkular_create_cluster_role.rc == 0
+ # AUDIT:changed_when_note: Checking the return code is insufficient
+ # here. We really need to verify the if the role even exists before
+ # we run this task.
- name: Configure role/user permissions
command: >
@@ -56,6 +59,10 @@
register: osmiq_perm_task
failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
changed_when: osmiq_perm_task.rc == 0
+ # AUDIT:changed_when_note: Checking the return code is insufficient
+ # here. We really need to compare the current role/user permissions
+ # with their expected state. I think we may have a module for this?
+
- name: Configure 3_2 role/user permissions
command: >
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 83843f126..edaa7d0df 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -32,7 +32,7 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
-openshift_metrics_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
+openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
openshift_metrics_resolution: 30s
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index b1d5f0e0f..609ca2a6e 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -75,3 +75,9 @@
# so containerized services should restart quickly as well.
retries: 24
delay: 5
+ # AUDIT:changed_when: `false` because we are only inspecting the
+ # state of the node, we aren't changing anything (we changed node
+ # service state in the previous task). You could say we shouldn't
+ # override this because something will be changing (the state of a
+ # service), but that should be part of the last task.
+ changed_when: false