summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/README.md9
-rw-r--r--roles/calico/defaults/main.yaml6
-rw-r--r--roles/calico/templates/10-calico.conf.j2 (renamed from roles/calico/templates/calicoctl.conf.j2)0
-rw-r--r--roles/calico/templates/calico.service.j28
-rw-r--r--roles/calico/templates/calicoctl.cfg.j2 (renamed from roles/calico/templates/10-calico.cfg.j2)0
-rw-r--r--roles/calico_master/README.md12
-rw-r--r--roles/etcd/defaults/main.yaml6
-rw-r--r--roles/etcd_common/defaults/main.yml11
-rw-r--r--roles/etcd_common/vars/main.yml4
-rw-r--r--roles/etcd_upgrade/defaults/main.yml9
-rw-r--r--roles/etcd_upgrade/meta/main.yml16
-rw-r--r--roles/etcd_upgrade/tasks/backup.yml71
-rw-r--r--roles/etcd_upgrade/tasks/main.yml14
-rw-r--r--roles/etcd_upgrade/tasks/upgrade.yml11
-rw-r--r--roles/etcd_upgrade/tasks/upgrade_image.yml48
-rw-r--r--roles/etcd_upgrade/tasks/upgrade_rpm.yml32
-rw-r--r--roles/etcd_upgrade/vars/main.yml3
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py4
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rw-r--r--roles/lib_openshift/src/class/oc_process.py2
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_facts/tasks/main.yml4
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py26
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py87
-rw-r--r--roles/openshift_health_checker/library/etcdkeysize.py122
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py172
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py84
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py58
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py6
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py16
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py36
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py182
-rw-r--r--roles/openshift_health_checker/test/etcd_imagedata_size_test.py328
-rw-r--r--roles/openshift_health_checker/test/etcd_volume_test.py149
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py43
-rw-r--r--roles/openshift_hosted/defaults/main.yml5
-rw-r--r--roles/openshift_hosted/meta/main.yml5
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/templates/registry_config.j24
-rw-r--r--roles/openshift_logging/README.md27
-rw-r--r--roles/openshift_logging/defaults/main.yml32
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py2
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml121
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml30
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml178
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml47
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml77
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml101
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml119
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml53
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml118
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml60
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml307
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml73
-rw-r--r--roles/openshift_logging/tasks/main.yaml21
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml52
-rw-r--r--roles/openshift_logging/tasks/oc_secret.yaml7
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml80
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml156
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml153
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml48
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/clusterrolebinding.j224
-rw-r--r--roles/openshift_logging/templates/es-storage-emptydir.partial1
-rw-r--r--roles/openshift_logging/templates/es-storage-hostpath.partial2
-rw-r--r--roles/openshift_logging/templates/es-storage-pvc.partial2
-rw-r--r--roles/openshift_logging/templates/fluentd.j2167
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j234
-rw-r--r--roles/openshift_logging/templates/serviceaccount.j216
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml33
-rw-r--r--roles/openshift_logging_curator/files/curator.yml (renamed from roles/openshift_logging/files/curator.yml)0
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml15
-rw-r--r--roles/openshift_logging_curator/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml113
-rw-r--r--roles/openshift_logging_curator/templates/curator.j2 (renamed from roles/openshift_logging/templates/curator.j2)11
-rw-r--r--roles/openshift_logging_curator/vars/main.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml57
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh (renamed from roles/openshift_logging/files/es_migration.sh)0
-rw-r--r--roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml15
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml19
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml278
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch-logging.yml.j2)33
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch.yml.j2)4
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j2 (renamed from roles/openshift_logging/templates/es.j2)26
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j2 (renamed from roles/openshift_logging/templates/pvc.j2)2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/rolebinding.j2 (renamed from roles/openshift_logging/templates/rolebinding.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml12
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml59
-rw-r--r--roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml (renamed from roles/openshift_logging/files/fluentd-throttle-config.yaml)0
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf (renamed from roles/openshift_logging/files/secure-forward.conf)0
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml15
-rw-r--r--roles/openshift_logging_fluentd/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml10
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml206
-rw-r--r--roles/openshift_logging_fluentd/templates/fluent.conf.j278
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j2123
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml5
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml41
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml15
-rw-r--r--roles/openshift_logging_kibana/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml232
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j2 (renamed from roles/openshift_logging/templates/kibana.j2)63
-rw-r--r--roles/openshift_logging_kibana/templates/oauth-client.j2 (renamed from roles/openshift_logging/templates/oauth-client.j2)3
-rw-r--r--roles/openshift_logging_kibana/templates/route_reencrypt.j2 (renamed from roles/openshift_logging/templates/route_reencrypt.j2)0
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml3
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml43
-rw-r--r--roles/openshift_logging_mux/files/fluent.conf (renamed from roles/openshift_logging/files/fluent.conf)0
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml15
-rw-r--r--roles/openshift_logging_mux/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml197
-rw-r--r--roles/openshift_logging_mux/templates/mux.j2 (renamed from roles/openshift_logging/templates/mux.j2)34
-rw-r--r--roles/openshift_logging_mux/vars/main.yml3
-rw-r--r--roles/openshift_master/README.md21
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j29
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py3
-rw-r--r--roles/openshift_master_facts/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml9
-rw-r--r--roles/openshift_node/README.md9
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/meta/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml10
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml6
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py2
-rw-r--r--roles/openshift_version/tasks/main.yml2
165 files changed, 3711 insertions, 2329 deletions
diff --git a/roles/calico/README.md b/roles/calico/README.md
index 99e870521..9b9458bfa 100644
--- a/roles/calico/README.md
+++ b/roles/calico/README.md
@@ -20,6 +20,15 @@ To install, set the following inventory configuration parameters:
* `openshift_use_openshift_sdn=False`
* `os_sdn_network_plugin_name='cni'`
+## Additional Calico/Node and Felix Configuration Options
+
+Additional parameters that can be defined in the inventory are:
+
+| Environment | Description | Schema | Default |
+|---------|----------------------|---------|---------|
+|CALICO_IPV4POOL_CIDR| The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. |IPv4 CIDR | 192.168.0.0/16 |
+| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up. | off, always, cross-subnet | always |
+| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String | /var/log/calico |
### Contact Information
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index a16a7da71..03c612982 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -12,3 +12,9 @@ calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
+
+calico_ipv4pool_ipip: "always"
+calico_ipv4pool_cidr: "192.168.0.0/16"
+
+calico_log_dir: "/var/log/calico"
+calico_node_image: "calico/node:v1.1.0"
diff --git a/roles/calico/templates/calicoctl.conf.j2 b/roles/calico/templates/10-calico.conf.j2
index 3c8c6b046..3c8c6b046 100644
--- a/roles/calico/templates/calicoctl.conf.j2
+++ b/roles/calico/templates/10-calico.conf.j2
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index 7a1236392..719d7ba0d 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -10,7 +10,8 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
--name=calico-node \
-e WAIT_FOR_DATASTORE=true \
-e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
- -e CALICO_IPV4POOL_IPIP=always \
+ -e CALICO_IPV4POOL_IPIP={{ calico_ipv4pool_ipip }} \
+ -e CALICO_IPV4POOL_CIDR={{ calico_ipv4pool_cidr }} \
-e FELIX_IPV6SUPPORT=false \
-e ETCD_ENDPOINTS={{ etcd_endpoints }} \
-v /etc/origin/calico:/etc/origin/calico \
@@ -18,10 +19,11 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
-e ETCD_CERT_FILE={{ calico_etcd_cert_file }} \
-e ETCD_KEY_FILE={{ calico_etcd_key_file }} \
-e NODENAME={{ openshift.common.hostname }} \
- -v /var/log/calico:/var/log/calico \
+ -v {{ calico_log_dir }}:/var/log/calico\
-v /lib/modules:/lib/modules \
-v /var/run/calico:/var/run/calico \
- calico/node:v1.1.0
+ {{ calico_node_image }}
+
ExecStop=-/usr/bin/docker stop calico-node
diff --git a/roles/calico/templates/10-calico.cfg.j2 b/roles/calico/templates/calicoctl.cfg.j2
index 722385ed8..722385ed8 100644
--- a/roles/calico/templates/10-calico.cfg.j2
+++ b/roles/calico/templates/calicoctl.cfg.j2
diff --git a/roles/calico_master/README.md b/roles/calico_master/README.md
index 2d34a967c..6f5ed0664 100644
--- a/roles/calico_master/README.md
+++ b/roles/calico_master/README.md
@@ -21,6 +21,18 @@ To install, set the following inventory configuration parameters:
* `os_sdn_network_plugin_name='cni'`
+
+## Additional Calico/Node and Felix Configuration Options
+
+Additional parameters that can be defined in the inventory are:
+
+
+| Environment | Description | Schema | Default |
+|---------|----------------------|---------|---------|
+|CALICO_IPV4POOL_CIDR| The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. |IPv4 CIDR | 192.168.0.0/16 |
+| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up. | off, always, cross-subnet | always |
+| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String | /var/log/calico |
+
### Contact Information
Author: Dan Osborne <dan@projectcalico.org>
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index e45f53219..c0d1d5946 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,10 +1,4 @@
---
-etcd_service: "{{ 'etcd' if openshift.common.is_etcd_system_container | bool or not etcd_is_containerized | bool else 'etcd_container' }}"
-etcd_client_port: 2379
-etcd_peer_port: 2380
-etcd_url_scheme: http
-etcd_peer_url_scheme: http
-
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index d12e6a07f..e1a080b34 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,6 +1,9 @@
---
+# runc, docker, host
+r_etcd_common_etcd_runtime: "docker"
+
# etcd server vars
-etcd_conf_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_conf_dir: "{{ '/etc/etcd' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/etc' }}"
etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -38,3 +41,9 @@ etcd_is_thirdparty: False
# etcd dir vars
etcd_data_dir: /var/lib/etcd/
+
+# etcd ports and protocols
+etcd_client_port: 2379
+etcd_peer_port: 2380
+etcd_url_scheme: http
+etcd_peer_url_scheme: http
diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml
new file mode 100644
index 000000000..00d697776
--- /dev/null
+++ b/roles/etcd_common/vars/main.yml
@@ -0,0 +1,4 @@
+---
+etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
+# Location of the service file is fixed and not meant to be changed
+etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
new file mode 100644
index 000000000..01ad8a268
--- /dev/null
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+r_etcd_upgrade_action: upgrade
+r_etcd_upgrade_mechanism: rpm
+r_etcd_upgrade_embedded_etcd: False
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
new file mode 100644
index 000000000..018bdc8d7
--- /dev/null
+++ b/roles/etcd_upgrade/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jan Chaloupka
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: etcd_common
diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_upgrade/tasks/backup.yml
new file mode 100644
index 000000000..1ea6fc59f
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/backup.yml
@@ -0,0 +1,71 @@
+---
+# INPUT r_etcd_backup_sufix_name
+# INPUT r_etcd_backup_tag
+# OUTPUT r_etcd_upgrade_backup_complete
+- set_fact:
+ # ORIGIN etcd_data_dir etcd_common.defaults
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}"
+
+# TODO: replace shell module with command and update later checks
+- name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
+ register: avail_disk
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+# TODO: replace shell module with command and update later checks
+- name: Check current etcd disk usage
+ shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: r_etcd_upgrade_embedded_etcd | bool
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+- name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+# For non containerized and non embedded we should have the correct version of
+# etcd installed already. So don't do anything.
+#
+# For containerized installs we now exec into etcd_container
+#
+# For embedded non containerized we need to ensure we have the latest version
+# etcd on the host.
+- name: Install latest etcd for embedded
+ package:
+ name: etcd
+ state: latest
+ when:
+ - r_etcd_upgrade_embedded_etcd | bool
+ - not l_ostree_booted.stat.exists | bool
+
+- name: Generate etcd backup
+ command: >
+ {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
+ --backup-dir={{ l_etcd_backup_dir }}
+
+# According to the docs change you can simply copy snap/db
+# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
+- name: Check for v3 data store
+ stat:
+ path: "{{ etcd_data_dir }}/member/snap/db"
+ register: v3_db
+
+- name: Copy etcd v3 data store
+ command: >
+ cp -a {{ etcd_data_dir }}/member/snap/db
+ {{ l_etcd_backup_dir }}/member/snap/
+ when: v3_db.stat.exists
+
+- set_fact:
+ r_etcd_upgrade_backup_complete: True
+
+- name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ l_etcd_backup_dir }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
new file mode 100644
index 000000000..5178c14e3
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+# INPUT r_etcd_upgrade_action
+- name: Fail if invalid etcd_upgrade_action provided
+ fail:
+ msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'"
+ when:
+ - r_etcd_upgrade_action not in ['upgrade', 'backup']
+
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
+- include: "{{ r_etcd_upgrade_action }}.yml"
diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml
new file mode 100644
index 000000000..420c9638e
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/upgrade.yml
@@ -0,0 +1,11 @@
+---
+# INPUT r_etcd_upgrade_version
+# INPUT r_etcd_upgrade_mechanism
+- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade
+ fail:
+ msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'"
+ when:
+ - r_etcd_upgrade_mechanism not in ['rpm', 'image']
+
+- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd"
+ include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml
diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd_upgrade/tasks/upgrade_image.yml
new file mode 100644
index 000000000..136ec1142
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/upgrade_image.yml
@@ -0,0 +1,48 @@
+---
+# INPUT r_etcd_upgrade_version
+- name: Verify cluster is healthy pre-upgrade
+ command: "{{ etcdctlv2 }} cluster-health"
+
+- name: Get current image
+ shell: "grep 'ExecStart=' {{ etcd_service_file }} | awk '{print $NF}'"
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ r_etcd_upgrade_version ) }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: "{{ etcd_service_file }}"
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: "{{ etcd_service }}"
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ package: name=etcd state=latest
+ when: not l_ostree_booted.stat.exists | bool
+
+- name: Verify cluster is healthy
+ command: "{{ etcdctlv2 }} cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ # DEPENDENCY openshift_facts
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd_upgrade/tasks/upgrade_rpm.yml
new file mode 100644
index 000000000..324b69605
--- /dev/null
+++ b/roles/etcd_upgrade/tasks/upgrade_rpm.yml
@@ -0,0 +1,32 @@
+---
+# INPUT r_etcd_upgrade_version?
+
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+# RHEL 7.3.4 with etcd-3.1.3-1.el7
+# RHEL 7.3.3 with etcd-3.1.0-2.el7
+# RHEL 7.3.2 with etcd-3.0.15-1.el7
+
+- name: Verify cluster is healthy pre-upgrade
+ command: "{{ etcdctlv2 }} cluster-health"
+
+- set_fact:
+ l_etcd_target_package: "{{ 'etcd' if r_etcd_upgrade_version is not defined else 'etcd-'+r_etcd_upgrade_version+'*' }}"
+
+- name: Update etcd RPM to {{ l_etcd_target_package }}
+ package:
+ name: "{{ l_etcd_target_package }}"
+ state: latest
+
+- name: Restart etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "{{ etcdctlv2 }} cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml
new file mode 100644
index 000000000..5ed919d42
--- /dev/null
+++ b/roles/etcd_upgrade/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# EXPECTS etcd_peer
+etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index a6273cfe4..7573c5b85 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -952,7 +952,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 7493b5c3d..bb3619081 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -938,7 +938,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 5e72f5954..358d4515b 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -924,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 371a3953b..5807f41a8 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -924,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index 7240521c6..e1b79466e 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1042,7 +1042,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index a54c62cd4..e3b1bbcbc 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1067,7 +1067,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 78c72ef26..9f3e819a3 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -916,7 +916,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index c88f56fc6..3c0e82a09 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -922,7 +922,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 17e3f7dde..008ce6a12 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -966,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 18ab97bc0..824ad4cb3 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -933,7 +933,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 88c6ef209..7eacac38e 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -906,7 +906,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 45860cbe5..266f8fbcf 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -925,7 +925,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 65923a698..756d7db42 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -942,7 +942,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 1d75a21b9..88d4ac8ca 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -945,7 +945,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 72add01f4..8e42083ca 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -877,7 +877,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 8e1ffe90f..15e9c606d 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -934,7 +934,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1450,7 +1450,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index a06852fd8..b653d9018 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -931,7 +931,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 79673452d..bab67d499 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -926,7 +926,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index ad705a6c5..7831ec8a4 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -976,7 +976,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 291ac8b19..133942e55 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -920,7 +920,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index df28df2bc..8c6877bb2 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -966,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index e98f83cc3..a482e13c1 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -972,7 +972,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index f00e9e4f6..263398e3d 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -918,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 6691495a6..cc7fda1b5 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -918,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 72f2fbf03..48ac28834 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -978,7 +978,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index bc3340a94..21dd5c3c9 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -890,7 +890,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 9dec0a6d4..be0944843 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -967,7 +967,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
index eba9a43cd..62a6bd571 100644
--- a/roles/lib_openshift/src/class/oc_process.py
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -30,7 +30,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 2bf795e25..70755187e 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -128,7 +128,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index 107e27f89..f19a421cb 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index f657d86cf..1b9bda67e 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -15,6 +15,9 @@
l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
- set_fact:
l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+- set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
- name: Validate python version
fail:
@@ -80,6 +83,7 @@
is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
system_images_registry: "{{ system_images_registry | default('') }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 03c40b78b..a62e4331e 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -25,9 +25,11 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
+ task_vars = task_vars or {}
- if task_vars is None:
- task_vars = {}
+ # vars are not supportably available in the callback plugin,
+ # so record any it will need in the result.
+ result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
if "openshift" not in task_vars:
result["failed"] = True
@@ -46,19 +48,27 @@ class ActionModule(ActionBase):
result["checks"] = check_results = {}
+ user_disabled_checks = [
+ check.strip()
+ for check in task_vars.get("openshift_disable_check", "").split(",")
+ ]
+
for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
- if check.is_active(task_vars):
+ if not check.is_active(task_vars):
+ r = dict(skipped=True, skipped_reason="Not active for this host")
+ elif check_name in user_disabled_checks:
+ r = dict(skipped=True, skipped_reason="Disabled by user request")
+ else:
try:
r = check.run(tmp, task_vars)
except OpenShiftCheckException as e:
- r = {}
- r["failed"] = True
- r["msg"] = str(e)
- else:
- r = {"skipped": True}
+ r = dict(
+ failed=True,
+ msg=str(e),
+ )
check_results[check_name] = r
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 7bce7f107..64c29a8d9 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -2,6 +2,12 @@
Ansible callback plugin.
'''
+# Reason: In several locations below we disable pylint protected-access
+# for Ansible objects that do not give us any public way
+# to access the full details we need to report check failures.
+# Status: disabled permanently or until Ansible object has a public API.
+# This does leave the code more likely to be broken by future Ansible changes.
+
from pprint import pformat
from ansible.plugins.callback import CallbackBase
@@ -20,38 +26,37 @@ class CallbackModule(CallbackBase):
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'failure_summary'
CALLBACK_NEEDS_WHITELIST = False
+ _playbook_file = None
def __init__(self):
super(CallbackModule, self).__init__()
self.__failures = []
+ def v2_playbook_on_start(self, playbook):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+ # re: playbook attrs see top comment # pylint: disable=protected-access
+ self._playbook_file = playbook._file_name
+
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
- # TODO: update condition to consider a host var or env var to
- # enable/disable the summary, so that we can control the output from a
- # play.
if self.__failures:
- self._print_failure_summary()
+ self._print_failure_details(self.__failures)
- def _print_failure_summary(self):
- '''Print a summary of failed tasks (including ignored failures).'''
+ def _print_failure_details(self, failures):
+ '''Print a summary of failed tasks or checks.'''
self._display.display(u'\nFailure summary:\n')
- # TODO: group failures by host or by task. If grouped by host, it is
- # easy to see all problems of a given host. If grouped by task, it is
- # easy to see what hosts needs the same fix.
-
- width = len(str(len(self.__failures)))
+ width = len(str(len(failures)))
initial_indent_format = u' {{:>{width}}}. '.format(width=width)
initial_indent_len = len(initial_indent_format.format(0))
subsequent_indent = u' ' * initial_indent_len
subsequent_extra_indent = u' ' * (initial_indent_len + 10)
- for i, failure in enumerate(self.__failures, 1):
+ for i, failure in enumerate(failures, 1):
entries = _format_failure(failure)
self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
for entry in entries[1:]:
@@ -59,11 +64,52 @@ class CallbackModule(CallbackBase):
indented = u'{}{}'.format(subsequent_indent, entry)
self._display.display(indented)
-
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of a task result to implement this method.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+ failed_checks = set()
+ playbook_context = None
+ # re: result attrs see top comment # pylint: disable=protected-access
+ for failure in failures:
+ # get context from check task result since callback plugins cannot access task vars
+ playbook_context = playbook_context or failure['result']._result.get('playbook_context')
+ failed_checks.update(
+ name
+ for name, result in failure['result']._result.get('checks', {}).items()
+ if result.get('failed')
+ )
+ if failed_checks:
+ self._print_check_failure_summary(failed_checks, playbook_context)
+
+ def _print_check_failure_summary(self, failed_checks, context):
+ checks = ','.join(sorted(failed_checks))
+ # NOTE: context is not set if all failures occurred prior to checks task
+ summary = (
+ '\n'
+ 'The execution of "{playbook}"\n'
+ 'includes checks designed to fail early if the requirements\n'
+ 'of the playbook are not met. One or more of these checks\n'
+ 'failed. To disregard these results, you may choose to\n'
+ 'disable failing checks by setting an Ansible variable:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Failing check names are shown in the failure details above.\n'
+ 'Some checks may be configurable by variables if your requirements\n'
+ 'are different from the defaults; consult check documentation.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n'
+ ).format(playbook=self._playbook_file, checks=checks)
+ if context in ['pre-install', 'health']:
+ summary = (
+ '\n'
+ 'You may choose to configure or disable failing checks by\n'
+ 'setting Ansible variables. To disable those above:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Consult check documentation for configurable variables.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n'
+ ).format(checks=checks)
+ # other expected contexts: install, upgrade
+ self._display.display(summary)
+
+
+# re: result attrs see top comment # pylint: disable=protected-access
def _format_failure(failure):
'''Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
@@ -100,11 +146,8 @@ def _format_failed_checks(checks):
return stringc(pformat(checks), C.COLOR_ERROR)
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of obj to implement this function.
-# This is inspired by ansible.playbook.base.Base.dump_me.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+# This is inspired by ansible.playbook.base.Base.dump_me.
+# re: play/task/block attrs see top comment # pylint: disable=protected-access
def _get_play(obj):
'''Given a task or block, recursively tries to find its parent play.'''
if hasattr(obj, '_play'):
diff --git a/roles/openshift_health_checker/library/etcdkeysize.py b/roles/openshift_health_checker/library/etcdkeysize.py
new file mode 100644
index 000000000..620e82d87
--- /dev/null
+++ b/roles/openshift_health_checker/library/etcdkeysize.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+"""Ansible module that recursively determines if the size of a key in an etcd cluster exceeds a given limit."""
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+try:
+ import etcd
+
+ IMPORT_EXCEPTION_MSG = None
+except ImportError as err:
+ IMPORT_EXCEPTION_MSG = str(err)
+
+ from collections import namedtuple
+ EtcdMock = namedtuple("etcd", ["EtcdKeyNotFound"])
+ etcd = EtcdMock(KeyError)
+
+
+# pylint: disable=too-many-arguments
+def check_etcd_key_size(client, key, size_limit, total_size=0, depth=0, depth_limit=1000, visited=None):
+ """Check size of an etcd path starting at given key. Returns tuple (string, bool)"""
+ if visited is None:
+ visited = set()
+
+ if key in visited:
+ return 0, False
+
+ visited.add(key)
+
+ try:
+ result = client.read(key, recursive=False)
+ except etcd.EtcdKeyNotFound:
+ return 0, False
+
+ size = 0
+ limit_exceeded = False
+
+ for node in result.leaves:
+ if depth >= depth_limit:
+ raise Exception("Maximum recursive stack depth ({}) exceeded.".format(depth_limit))
+
+ if size_limit and total_size + size > size_limit:
+ return size, True
+
+ if not node.dir:
+ size += len(node.value)
+ continue
+
+ key_size, limit_exceeded = check_etcd_key_size(client, node.key,
+ size_limit,
+ total_size + size,
+ depth + 1,
+ depth_limit, visited)
+ size += key_size
+
+ max_limit_exceeded = limit_exceeded or (total_size + size > size_limit)
+ return size, max_limit_exceeded
+
+
+def main(): # pylint: disable=missing-docstring,too-many-branches
+ module = AnsibleModule(
+ argument_spec=dict(
+ size_limit_bytes=dict(type="int", default=0),
+ paths=dict(type="list", default=["/openshift.io/images"]),
+ host=dict(type="str", default="127.0.0.1"),
+ port=dict(type="int", default=4001),
+ protocol=dict(type="str", default="http"),
+ version_prefix=dict(type="str", default=""),
+ allow_redirect=dict(type="bool", default=False),
+ cert=dict(type="dict", default=""),
+ ca_cert=dict(type="str", default=None),
+ ),
+ supports_check_mode=True
+ )
+
+ module.params["cert"] = (
+ module.params["cert"]["cert"],
+ module.params["cert"]["key"],
+ )
+
+ size_limit = module.params.pop("size_limit_bytes")
+ paths = module.params.pop("paths")
+
+ limit_exceeded = False
+
+ try:
+ # pylint: disable=no-member
+ client = etcd.Client(**module.params)
+ except AttributeError as attrerr:
+ msg = str(attrerr)
+ if IMPORT_EXCEPTION_MSG:
+ msg = IMPORT_EXCEPTION_MSG
+ if "No module named etcd" in IMPORT_EXCEPTION_MSG:
+ # pylint: disable=redefined-variable-type
+ msg = ('Unable to import the python "etcd" dependency. '
+ 'Make sure python-etcd is installed on the host.')
+
+ module.exit_json(
+ failed=True,
+ changed=False,
+ size_limit_exceeded=limit_exceeded,
+ msg=msg,
+ )
+
+ return
+
+ size = 0
+ for path in paths:
+ path_size, limit_exceeded = check_etcd_key_size(client, path, size_limit - size)
+ size += path_size
+
+ if limit_exceeded:
+ break
+
+ module.exit_json(
+ changed=False,
+ size_limit_exceeded=limit_exceeded,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index cd9b55902..4d141974c 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -2,3 +2,4 @@
dependencies:
- role: openshift_facts
- role: openshift_repos
+ - role: openshift_version
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index c2792a0fe..962148cb8 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -27,10 +27,12 @@ class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
ansible_mounts = get_var(task_vars, "ansible_mounts")
-
- min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
free_bytes = self.openshift_available_disk(ansible_mounts)
+ recommended_min = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
+ configured_min = int(get_var(task_vars, "openshift_check_min_host_disk_gb", default=0)) * 10**9
+ min_free_bytes = configured_min or recommended_min
+
if free_bytes < min_free_bytes:
return {
'failed': True,
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index cce289b95..4588ed634 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -13,41 +13,55 @@ class DockerImageAvailability(OpenShiftCheck):
name = "docker_image_availability"
tags = ["preflight"]
- skopeo_image = "openshift/openshift-ansible"
+ dependencies = ["skopeo", "python-docker-py"]
- # FIXME(juanvallejo): we should consider other possible values of
- # `deployment_type` (the key here). See
- # https://github.com/openshift/openshift-ansible/blob/8e26f8c/roles/openshift_repos/vars/main.yml#L7
- docker_image_base = {
+ deployment_image_info = {
"origin": {
- "repo": "openshift",
- "image": "origin",
+ "namespace": "openshift",
+ "name": "origin",
},
"openshift-enterprise": {
- "repo": "openshift3",
- "image": "ose",
+ "namespace": "openshift3",
+ "name": "ose",
},
}
- def run(self, tmp, task_vars):
- required_images = self.required_images(task_vars)
- missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts with unsupported deployment types."""
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ has_valid_deployment_type = deployment_type in cls.deployment_image_info
- # exit early if all images were found locally
- if not missing_images:
- return {"changed": False}
+ return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
- msg, failed, changed = self.update_skopeo_image(task_vars)
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
# exit early if Skopeo update fails
if failed:
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
- "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg),
+ "msg": (
+ "Unable to update or install required dependency packages on this host;\n"
+ "These are required in order to check Docker image availability:"
+ "\n {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg),
}
+ required_images = self.required_images(task_vars)
+ missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+
+ # exit early if all images were found locally
+ if not missing_images:
+ return {"changed": changed}
+
registries = self.known_docker_registries(task_vars)
+ if not registries:
+ return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
+
available_images = self.available_images(missing_images, registries, task_vars)
unavailable_images = set(missing_images) - set(available_images)
@@ -55,44 +69,60 @@ class DockerImageAvailability(OpenShiftCheck):
return {
"failed": True,
"msg": (
- "One or more required images are not available: {}.\n"
+ "One or more required Docker images are not available:\n {}\n"
"Configured registries: {}"
- ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)),
+ ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}
return {"changed": changed}
def required_images(self, task_vars):
- deployment_type = get_var(task_vars, "deployment_type")
- # FIXME(juanvallejo): we should handle gracefully with a proper error
- # message when given an unexpected value for `deployment_type`.
- image_base_name = self.docker_image_base[deployment_type]
-
- openshift_release = get_var(task_vars, "openshift_release")
- # FIXME(juanvallejo): this variable is not required when the
- # installation is non-containerized. The example inventories have it
- # commented out. We should handle gracefully and with a proper error
- # message when this variable is required and not set.
- openshift_image_tag = get_var(task_vars, "openshift_image_tag")
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ image_info = self.deployment_image_info[deployment_type]
+ openshift_release = get_var(task_vars, "openshift_release", default="latest")
+ openshift_image_tag = get_var(task_vars, "openshift_image_tag")
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- if is_containerized:
- images = set(self.containerized_docker_images(image_base_name, openshift_release))
- else:
- images = set(self.rpm_docker_images(image_base_name, openshift_release))
+ images = set(self.required_docker_images(
+ image_info["namespace"],
+ image_info["name"],
+ ["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names
+ openshift_release,
+ is_containerized,
+ ))
# append images with qualified image tags to our list of required images.
# these are images with a (v0.0.0.0) tag, rather than a standard release
# format tag (v0.0). We want to check this set in both containerized and
# non-containerized installations.
images.update(
- self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag)
+ self.required_qualified_docker_images(
+ image_info["namespace"],
+ image_info["name"],
+ openshift_image_tag,
+ ),
)
return images
+ @staticmethod
+ def required_docker_images(namespace, name, additional_image_names, version, is_containerized):
+ if is_containerized:
+ return ["{}/{}:{}".format(namespace, name, version)] if name else []
+
+ # include additional non-containerized images specific to the current deployment type
+ return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names]
+
+ @staticmethod
+ def required_qualified_docker_images(namespace, name, version):
+ # pylint: disable=invalid-name
+ return [
+ "{}/{}-{}:{}".format(namespace, name, suffix, version)
+ for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"]
+ ]
+
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
return [
@@ -107,31 +137,26 @@ class DockerImageAvailability(OpenShiftCheck):
return bool(result.get("images", []))
- def known_docker_registries(self, task_vars):
- result = self.module_executor("docker_info", {}, task_vars)
+ @staticmethod
+ def known_docker_registries(task_vars):
+ docker_facts = get_var(task_vars, "openshift", "docker")
+ regs = set(docker_facts["additional_registries"])
- if result.get("failed", False):
- return []
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ if deployment_type == "origin":
+ regs.update(["docker.io"])
+ elif "enterprise" in deployment_type:
+ regs.update(["registry.access.redhat.com"])
- # FIXME(juanvallejo): wrong default type, result["info"] is expected to
- # contain a dictionary (see how we call `docker_info.get` below).
- docker_info = result.get("info", "")
- return [registry.get("Name", "") for registry in docker_info.get("Registries", {})]
+ return list(regs)
def available_images(self, images, registries, task_vars):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
- if self.is_image_available(image, registries, task_vars)
+ if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)
]
- def is_image_available(self, image, registries, task_vars):
- for registry in registries:
- if self.is_available_skopeo_image(image, registry, task_vars):
- return True
-
- return False
-
def is_available_skopeo_image(self, image, registry, task_vars):
"""Uses Skopeo to determine if required image exists in a given registry."""
@@ -140,40 +165,15 @@ class DockerImageAvailability(OpenShiftCheck):
image=image,
)
- args = {
- "name": "skopeo_inspect",
- "image": self.skopeo_image,
- "command": cmd_str,
- "detach": False,
- "cleanup": True,
- }
- result = self.module_executor("docker_container", args, task_vars)
- return result.get("failed", False)
-
- def containerized_docker_images(self, base_name, version):
- return [
- "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version)
- ]
+ args = {"_raw_params": cmd_str}
+ result = self.module_executor("command", args, task_vars)
+ return not result.get("failed", False) and result.get("rc", 0) == 0
- @staticmethod
- def rpm_docker_images(base, version):
- return [
- "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version)
- ]
+ # ensures that the skopeo and python-docker-py packages exist
+ # check is skipped on atomic installations
+ def ensure_dependencies(self, task_vars):
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
- @staticmethod
- def qualified_docker_images(image_name, version):
- return [
- "{}-{}:{}".format(image_name, component, version)
- for component in "haproxy-router docker-registry deployer pod".split()
- ]
-
- @staticmethod
- def image_from_base_name(base):
- return "".join([base["repo"], "/", base["image"]])
-
- # ensures that the skopeo docker image exists, and updates it
- # with latest if image was already present locally.
- def update_skopeo_image(self, task_vars):
- result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars)
- return result.get("msg", ""), result.get("failed", False), result.get("changed", False)
+ result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
+ return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
new file mode 100644
index 000000000..c04a69765
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
@@ -0,0 +1,84 @@
+"""
+Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.
+"""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+
+
+class EtcdImageDataSize(OpenShiftCheck):
+ """Check that total size of OpenShift image data does not exceed the recommended limit in an etcd cluster"""
+
+ name = "etcd_imagedata_size"
+ tags = ["etcd"]
+
+ def run(self, tmp, task_vars):
+ etcd_mountpath = self._get_etcd_mountpath(get_var(task_vars, "ansible_mounts"))
+ etcd_avail_diskspace = etcd_mountpath["size_available"]
+ etcd_total_diskspace = etcd_mountpath["size_total"]
+
+ etcd_imagedata_size_limit = get_var(task_vars,
+ "etcd_max_image_data_size_bytes",
+ default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace)))
+
+ etcd_is_ssl = get_var(task_vars, "openshift", "master", "etcd_use_ssl", default=False)
+ etcd_port = get_var(task_vars, "openshift", "master", "etcd_port", default=2379)
+ etcd_hosts = get_var(task_vars, "openshift", "master", "etcd_hosts")
+
+ config_base = get_var(task_vars, "openshift", "common", "config_base")
+
+ cert = task_vars.get("etcd_client_cert", config_base + "/master/master.etcd-client.crt")
+ key = task_vars.get("etcd_client_key", config_base + "/master/master.etcd-client.key")
+ ca_cert = task_vars.get("etcd_client_ca_cert", config_base + "/master/master.etcd-ca.crt")
+
+ for etcd_host in list(etcd_hosts):
+ args = {
+ "size_limit_bytes": etcd_imagedata_size_limit,
+ "paths": ["/openshift.io/images", "/openshift.io/imagestreams"],
+ "host": etcd_host,
+ "port": etcd_port,
+ "protocol": "https" if etcd_is_ssl else "http",
+ "version_prefix": "/v2",
+ "allow_redirect": True,
+ "ca_cert": ca_cert,
+ "cert": {
+ "cert": cert,
+ "key": key,
+ },
+ }
+
+ etcdkeysize = self.module_executor("etcdkeysize", args, task_vars)
+
+ if etcdkeysize.get("rc", 0) != 0 or etcdkeysize.get("failed"):
+ msg = 'Failed to retrieve stats for etcd host "{host}": {reason}'
+ reason = etcdkeysize.get("msg")
+ if etcdkeysize.get("module_stderr"):
+ reason = etcdkeysize["module_stderr"]
+
+ msg = msg.format(host=etcd_host, reason=reason)
+ return {"failed": True, "changed": False, "msg": msg}
+
+ if etcdkeysize["size_limit_exceeded"]:
+ limit = self._to_gigabytes(etcd_imagedata_size_limit)
+ msg = ("The size of OpenShift image data stored in etcd host "
+ "\"{host}\" exceeds the maximum recommended limit of {limit:.2f} GB. "
+ "Use the `oadm prune images` command to cleanup unused Docker images.")
+ return {"failed": True, "msg": msg.format(host=etcd_host, limit=limit)}
+
+ return {"changed": False}
+
+ @staticmethod
+ def _get_etcd_mountpath(ansible_mounts):
+ valid_etcd_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
+
+ mount_for_path = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+ for path in valid_etcd_mount_paths:
+ if path in mount_for_path:
+ return mount_for_path[path]
+
+ paths = ', '.join(sorted(mount_for_path)) or 'none'
+ msg = "Unable to determine a valid etcd mountpath. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
+
+ @staticmethod
+ def _to_gigabytes(byte_size):
+ return float(byte_size) / 10.0**9
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
new file mode 100644
index 000000000..7452c9cc1
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -0,0 +1,58 @@
+"""A health check for OpenShift clusters."""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+
+
+class EtcdVolume(OpenShiftCheck):
+ """Ensures etcd storage usage does not exceed a given threshold."""
+
+ name = "etcd_volume"
+ tags = ["etcd", "health"]
+
+ # Default device usage threshold. Value should be in the range [0, 100].
+ default_threshold_percent = 90
+ # Where to find ectd data, higher priority first.
+ supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
+
+ @classmethod
+ def is_active(cls, task_vars):
+ etcd_hosts = get_var(task_vars, "groups", "etcd", default=[]) or get_var(task_vars, "groups", "masters",
+ default=[]) or []
+ is_etcd_host = get_var(task_vars, "ansible_ssh_host") in etcd_hosts
+ return super(EtcdVolume, cls).is_active(task_vars) and is_etcd_host
+
+ def run(self, tmp, task_vars):
+ mount_info = self._etcd_mount_info(task_vars)
+ available = mount_info["size_available"]
+ total = mount_info["size_total"]
+ used = total - available
+
+ threshold = get_var(
+ task_vars,
+ "etcd_device_usage_threshold_percent",
+ default=self.default_threshold_percent
+ )
+
+ used_percent = 100.0 * used / total
+
+ if used_percent > threshold:
+ device = mount_info.get("device", "unknown")
+ mount = mount_info.get("mount", "unknown")
+ msg = "etcd storage usage ({:.1f}%) is above threshold ({:.1f}%). Device: {}, mount: {}.".format(
+ used_percent, threshold, device, mount
+ )
+ return {"failed": True, "msg": msg}
+
+ return {"changed": False}
+
+ def _etcd_mount_info(self, task_vars):
+ ansible_mounts = get_var(task_vars, "ansible_mounts")
+ mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+
+ for path in self.supported_mount_paths:
+ if path in mounts:
+ return mounts[path]
+
+ paths = ', '.join(sorted(mounts)) or 'none'
+ msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index 28805dc37..8b1a58ef4 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -13,7 +13,7 @@ class MemoryAvailability(OpenShiftCheck):
recommended_memory_bytes = {
"masters": 16 * 10**9,
"nodes": 8 * 10**9,
- "etcd": 20 * 10**9,
+ "etcd": 8 * 10**9,
}
@classmethod
@@ -27,7 +27,9 @@ class MemoryAvailability(OpenShiftCheck):
group_names = get_var(task_vars, "group_names")
total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
- min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+ recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+ configured_min = int(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * 10**9
+ min_memory_bytes = configured_min or recommended_min
if total_memory_bytes < min_memory_bytes:
return {
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 2693ae37b..6ebf0ebb2 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -67,6 +67,7 @@ def changed(result):
return result.get('changed', False)
+# tests whether task is skipped, not individual checks
def skipped(result):
return result.get('skipped', False)
@@ -101,7 +102,20 @@ def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == {'skipped': True}
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check('fake_check', is_active=True)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ task_vars['openshift_disable_check'] = 'fake_check'
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
assert not failed(result)
assert not changed(result)
assert not skipped(result)
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 970b474d7..b353fa610 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -42,9 +42,10 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
assert word in str(excinfo.value)
-@pytest.mark.parametrize('group_names,ansible_mounts', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
(
['masters'],
+ 0,
[{
'mount': '/',
'size_available': 40 * 10**9 + 1,
@@ -52,6 +53,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['nodes'],
+ 0,
[{
'mount': '/',
'size_available': 15 * 10**9 + 1,
@@ -59,6 +61,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['etcd'],
+ 0,
[{
'mount': '/',
'size_available': 20 * 10**9 + 1,
@@ -66,6 +69,15 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['etcd'],
+ 1, # configure lower threshold
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9 + 1, # way smaller than recommended
+ }],
+ ),
+ (
+ ['etcd'],
+ 0,
[{
# not enough space on / ...
'mount': '/',
@@ -77,9 +89,10 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
}],
),
])
-def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
+def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
ansible_mounts=ansible_mounts,
)
@@ -89,9 +102,10 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
assert not result.get('failed', False)
-@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts,extra_words', [
(
['masters'],
+ 0,
[{
'mount': '/',
'size_available': 1,
@@ -99,7 +113,17 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
['0.0 GB'],
),
(
+ ['masters'],
+ 100, # set a higher threshold
+ [{
+ 'mount': '/',
+ 'size_available': 50 * 10**9, # would normally be enough...
+ }],
+ ['100.0 GB'],
+ ),
+ (
['nodes'],
+ 0,
[{
'mount': '/',
'size_available': 1 * 10**9,
@@ -108,6 +132,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['etcd'],
+ 0,
[{
'mount': '/',
'size_available': 1,
@@ -116,6 +141,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['nodes', 'masters'],
+ 0,
[{
'mount': '/',
# enough space for a node, not enough for a master
@@ -125,6 +151,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['etcd'],
+ 0,
[{
# enough space on / ...
'mount': '/',
@@ -137,9 +164,10 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
['0.0 GB'],
),
])
-def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words):
+def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible_mounts, extra_words):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
ansible_mounts=ansible_mounts,
)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 2a9c32f77..0379cafb5 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,26 +3,176 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.xfail(strict=True) # TODO: remove this once this test is fully implemented.
-@pytest.mark.parametrize('task_vars,expected_result', [
- (
- dict(
- openshift=dict(common=dict(
+@pytest.mark.parametrize('deployment_type,is_active', [
+ ("origin", True),
+ ("openshift-enterprise", True),
+ ("enterprise", False),
+ ("online", False),
+ ("invalid", False),
+ ("", False),
+])
+def test_is_active(deployment_type, is_active):
+ task_vars = dict(
+ openshift_deployment_type=deployment_type,
+ )
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize("is_containerized,is_atomic", [
+ (True, True),
+ (False, False),
+ (True, False),
+ (False, True),
+])
+def test_all_images_available_locally(is_containerized, is_atomic):
+ def execute_module(module_name, args, task_vars):
+ if module_name == "yum":
+ return {"changed": True}
+
+ assert module_name == "docker_image_facts"
+ assert 'name' in args
+ assert args['name']
+ return {
+ 'images': [args['name']],
+ }
+
+ result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=is_containerized,
+ is_atomic=is_atomic,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type='origin',
+ openshift_release='v3.4',
+ openshift_image_tag='3.4',
+ ))
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize("available_locally", [
+ False,
+ True,
+])
+def test_all_images_available_remotely(available_locally):
+ def execute_module(module_name, args, task_vars):
+ if module_name == 'docker_image_facts':
+ return {'images': [], 'failed': available_locally}
+ return {'changed': False}
+
+ result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
service_type='origin',
is_containerized=False,
- )),
- openshift_release='v3.5',
- deployment_type='origin',
- openshift_image_tag='', # FIXME: should not be required
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
),
- {'changed': False},
+ openshift_deployment_type='origin',
+ openshift_release='3.4',
+ openshift_image_tag='v3.4',
+ ))
+
+ assert not result.get('failed', False)
+
+
+def test_all_images_unavailable():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ if module_name == "command":
+ return {
+ 'failed': True,
+ }
+
+ return {
+ 'changed': False,
+ }
+
+ check = DockerImageAvailability(execute_module=execute_module)
+ actual = check.run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_release=None,
+ openshift_image_tag='latest'
+ ))
+
+ assert actual['failed']
+ assert "required Docker images are not available" in actual['msg']
+
+
+@pytest.mark.parametrize("message,extra_words", [
+ (
+ "docker image update failure",
+ ["docker image update failure"],
+ ),
+ (
+ "No package matching 'skopeo' found available, installed or updated",
+ ["dependencies can be installed via `yum`"]
),
- # TODO: add more parameters here to test the multiple possible inputs that affect behavior.
])
-def test_docker_image_availability(task_vars, expected_result):
+def test_skopeo_update_failure(message, extra_words):
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {'info': {}} # TODO: this will vary depending on input parameters.
+ if module_name == "yum":
+ return {
+ "failed": True,
+ "msg": message,
+ "changed": False,
+ }
- check = DockerImageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result == expected_result
+ return {'changed': False}
+
+ actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["unknown.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_release='',
+ openshift_image_tag='',
+ ))
+
+ assert actual["failed"]
+ for word in extra_words:
+ assert word in actual["msg"]
+
+
+@pytest.mark.parametrize("deployment_type,registries", [
+ ("origin", ["unknown.io"]),
+ ("openshift-enterprise", ["registry.access.redhat.com"]),
+ ("openshift-enterprise", []),
+])
+def test_registry_availability(deployment_type, registries):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {
+ 'changed': False,
+ }
+
+ actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=registries),
+ ),
+ openshift_deployment_type=deployment_type,
+ openshift_release='',
+ openshift_image_tag='',
+ ))
+
+ assert not actual.get("failed", False)
diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
new file mode 100644
index 000000000..df9d52d41
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
@@ -0,0 +1,328 @@
+import pytest
+
+from collections import namedtuple
+from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException
+from etcdkeysize import check_etcd_key_size
+
+
+def fake_etcd_client(root):
+ fake_nodes = dict()
+ fake_etcd_node(root, fake_nodes)
+
+ clientclass = namedtuple("client", ["read"])
+ return clientclass(lambda key, recursive: fake_etcd_result(fake_nodes[key]))
+
+
+def fake_etcd_result(fake_node):
+ resultclass = namedtuple("result", ["leaves"])
+ if not fake_node.dir:
+ return resultclass([fake_node])
+
+ return resultclass(fake_node.leaves)
+
+
+def fake_etcd_node(node, visited):
+ min_req_fields = ["dir", "key"]
+ fields = list(node)
+ leaves = []
+
+ if node["dir"] and node.get("leaves"):
+ for leaf in node["leaves"]:
+ leaves.append(fake_etcd_node(leaf, visited))
+
+ if len(set(min_req_fields) - set(fields)) > 0:
+ raise ValueError("fake etcd nodes require at least {} fields.".format(min_req_fields))
+
+ if node.get("leaves"):
+ node["leaves"] = leaves
+
+ nodeclass = namedtuple("node", fields)
+ nodeinst = nodeclass(**node)
+ visited[nodeinst.key] = nodeinst
+
+ return nodeinst
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+])
+def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ )
+ check = EtcdImageDataSize(execute_module=fake_execute_module)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in 'determine valid etcd mountpath'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('ansible_mounts,tree,size_limit,should_fail,extra_words', [
+ (
+ # test that default image size limit evals to 1/2 * (total size in use)
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ False,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ False,
+ [],
+ ),
+ (
+ # set max size limit for image data to be below total node value
+ # total node value is defined as the sum of the value field
+ # from every node
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "12345678"},
+ 7,
+ True,
+ ["exceeds the maximum recommended limit", "0.00 GB"],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 48 * 10**9 - 1,
+ 'size_total': 48 * 10**9,
+ }],
+ {"dir": False, "key": "/", "value": "1234"},
+ None,
+ True,
+ ["exceeds the maximum recommended limit", "0.00 GB"],
+ )
+])
+def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ client = fake_etcd_client(tree)
+ s, limit_exceeded = check_etcd_key_size(client, tree["key"], args["size_limit_bytes"])
+
+ return {"size_limit_exceeded": limit_exceeded}
+
+ task_vars = dict(
+ etcd_max_image_data_size_bytes=size_limit,
+ ansible_mounts=ansible_mounts,
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+ if size_limit is None:
+ task_vars.pop("etcd_max_image_data_size_bytes")
+
+ check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+
+ if should_fail:
+ assert check["failed"]
+
+ for word in extra_words:
+ assert word in check["msg"]
+ else:
+ assert not check.get("failed", False)
+
+
+@pytest.mark.parametrize('ansible_mounts,tree,root_path,expected_size,extra_words', [
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test recursive size check on tree with height > 1
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/foo5",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar1", "value": "56789"},
+ {"dir": False, "key": "/foo/bar2", "value": "56789"},
+ {"dir": False, "key": "/foo/bar3", "value": "56789"},
+ {
+ "dir": True,
+ "key": "/foo/bar4",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar/baz1", "value": "123"},
+ {"dir": False, "key": "/foo/bar/baz2", "value": "123"},
+ ]
+ },
+ ]
+ },
+ ]
+ },
+ "/",
+ 37,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test correct sub-tree size calculation
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/foo5",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar1", "value": "56789"},
+ {"dir": False, "key": "/foo/bar2", "value": "56789"},
+ {"dir": False, "key": "/foo/bar3", "value": "56789"},
+ {
+ "dir": True,
+ "key": "/foo/bar4",
+ "leaves": [
+ {"dir": False, "key": "/foo/bar/baz1", "value": "123"},
+ {"dir": False, "key": "/foo/bar/baz2", "value": "123"},
+ ]
+ },
+ ]
+ },
+ ]
+ },
+ "/foo5",
+ 21,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test that a non-existing key is handled correctly
+ {
+ "dir": False,
+ "key": "/",
+ "value": "1234",
+ },
+ "/missing",
+ 0,
+ [],
+ ),
+ (
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ # test etcd cycle handling
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1234"},
+ {"dir": False, "key": "/foo2", "value": "1234"},
+ {"dir": False, "key": "/foo3", "value": "1234"},
+ {"dir": False, "key": "/foo4", "value": "1234"},
+ {
+ "dir": True,
+ "key": "/",
+ "leaves": [
+ {"dir": False, "key": "/foo1", "value": "1"},
+ ],
+ },
+ ]
+ },
+ "/",
+ 16,
+ [],
+ ),
+])
+def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ client = fake_etcd_client(tree)
+ size, limit_exceeded = check_etcd_key_size(client, root_path, args["size_limit_bytes"])
+
+ assert size == expected_size
+ return {
+ "size_limit_exceeded": limit_exceeded,
+ }
+
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+
+ check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ assert not check.get("failed", False)
+
+
+def test_etcdkeysize_module_failure():
+ def execute_module(module_name, tmp=None, task_vars=None):
+ if module_name != "etcdkeysize":
+ return {
+ "changed": False,
+ }
+
+ return {
+ "rc": 1,
+ "module_stderr": "failure",
+ }
+
+ task_vars = dict(
+ ansible_mounts=[{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9,
+ }],
+ openshift=dict(
+ master=dict(etcd_hosts=["localhost"]),
+ common=dict(config_base="/var/lib/origin")
+ )
+ )
+
+ check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+
+ assert check["failed"]
+ for word in "Failed to retrieve stats":
+ assert word in check["msg"]
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py
new file mode 100644
index 000000000..917045526
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_volume_test.py
@@ -0,0 +1,149 @@
+import pytest
+
+from openshift_checks.etcd_volume import EtcdVolume, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ ansible_mounts=ansible_mounts,
+ )
+ check = EtcdVolume(execute_module=fake_execute_module)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in 'Unable to find etcd storage mount point'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('size_limit,ansible_mounts', [
+ (
+ # if no size limit is specified, expect max usage
+ # limit to default to 90% of size_total
+ None,
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9,
+ 'size_total': 80 * 10**9
+ }],
+ ),
+ (
+ 1,
+ [{
+ 'mount': '/',
+ 'size_available': 30 * 10**9,
+ 'size_total': 30 * 10**9,
+ }],
+ ),
+ (
+ 20000000000,
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9,
+ 'size_total': 40 * 10**9,
+ }],
+ ),
+ (
+ 5000000000,
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 0,
+ 'size_total': 0,
+ }, {
+ # not enough space on /var/lib ...
+ 'mount': '/var/lib',
+ 'size_available': 2 * 10**9,
+ 'size_total': 21 * 10**9,
+ }, {
+ # ... but enough on /var/lib/etcd
+ 'mount': '/var/lib/etcd',
+ 'size_available': 36 * 10**9,
+ 'size_total': 40 * 10**9
+ }],
+ )
+])
+def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
+ task_vars = dict(
+ etcd_device_usage_threshold_percent=size_limit,
+ ansible_mounts=ansible_mounts,
+ )
+
+ if task_vars["etcd_device_usage_threshold_percent"] is None:
+ task_vars.pop("etcd_device_usage_threshold_percent")
+
+ check = EtcdVolume(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('size_limit_percent,ansible_mounts,extra_words', [
+ (
+ # if no size limit is specified, expect max usage
+ # limit to default to 90% of size_total
+ None,
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ 'size_total': 100 * 10**9,
+ }],
+ ['99.0%'],
+ ),
+ (
+ 70.0,
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**6,
+ 'size_total': 5 * 10**9,
+ }],
+ ['100.0%'],
+ ),
+ (
+ 40.0,
+ [{
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ 'size_total': 6 * 10**9,
+ }],
+ ['66.7%'],
+ ),
+ (
+ None,
+ [{
+ # enough space on /var ...
+ 'mount': '/var',
+ 'size_available': 20 * 10**9,
+ 'size_total': 20 * 10**9,
+ }, {
+ # .. but not enough on /var/lib
+ 'mount': '/var/lib',
+ 'size_available': 1 * 10**9,
+ 'size_total': 20 * 10**9,
+ }],
+ ['95.0%'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts, extra_words):
+ task_vars = dict(
+ etcd_device_usage_threshold_percent=size_limit_percent,
+ ansible_mounts=ansible_mounts,
+ )
+
+ if task_vars["etcd_device_usage_threshold_percent"] is None:
+ task_vars.pop("etcd_device_usage_threshold_percent")
+
+ check = EtcdVolume(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index e161a5b9e..1db203854 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -20,27 +20,37 @@ def test_is_active(group_names, is_active):
assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
-@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
(
['masters'],
+ 0,
17200,
),
(
['nodes'],
+ 0,
8200,
),
(
+ ['nodes'],
+ 1, # configure lower threshold
+ 2000, # too low for recommended but not for configured
+ ),
+ (
['etcd'],
- 22200,
+ 0,
+ 8200,
),
(
['masters', 'nodes'],
+ 0,
17000,
),
])
-def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_memtotal_mb):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
ansible_memtotal_mb=ansible_memtotal_mb,
)
@@ -50,39 +60,56 @@ def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
assert not result.get('failed', False)
-@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [
(
['masters'],
0,
+ 0,
['0.0 GB'],
),
(
['nodes'],
+ 0,
100,
['0.1 GB'],
),
(
+ ['nodes'],
+ 24, # configure higher threshold
+ 20000, # enough to meet recommended but not configured
+ ['20.0 GB'],
+ ),
+ (
['etcd'],
- -1,
- ['0.0 GB'],
+ 0,
+ 7000,
+ ['7.0 GB'],
+ ),
+ (
+ ['etcd', 'masters'],
+ 0,
+ 9000, # enough memory for etcd, not enough for a master
+ ['9.0 GB'],
),
(
['nodes', 'masters'],
+ 0,
# enough memory for a node, not enough for a master
11000,
['11.0 GB'],
),
])
-def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+def test_fails_with_insufficient_memory(group_names, configured_min, ansible_memtotal_mb, extra_words):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
ansible_memtotal_mb=ansible_memtotal_mb,
)
check = MemoryAvailability(execute_module=fake_execute_module)
result = check.run(tmp=None, task_vars=task_vars)
- assert result['failed']
+ assert result.get('failed', False)
for word in 'below recommended'.split() + extra_words:
assert word in result['msg']
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index e7e62e5e4..089054e2f 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -30,3 +30,8 @@ openshift_hosted_routers:
openshift_hosted_router_certificate: {}
openshift_hosted_registry_cert_expire_days: 730
openshift_hosted_router_create_certificate: False
+
+os_firewall_allow:
+- service: Docker Registry Port
+ port: 5000/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 9626c23c1..9e3f37130 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -15,3 +15,8 @@ dependencies:
- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
+- role: os_firewall
+ os_firewall_allow:
+ - service: Docker Registry Port
+ port: 5000/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml
index 26f921f15..318969885 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml
@@ -2,14 +2,10 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.accesskey | default(none) is not none
- - openshift.hosted.registry.storage.s3.secretkey | default(none) is not none
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- openshift.hosted.registry.storage.s3.region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
- openshift_hosted_registry_storage_s3_accesskey
- openshift_hosted_registry_storage_s3_secretkey
openshift_hosted_registry_storage_s3_bucket
openshift_hosted_registry_storage_s3_region
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index ca6a23f21..dc8a9f089 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -10,8 +10,12 @@ storage:
blobdescriptor: inmemory
{% if openshift_hosted_registry_storage_provider | default('') == 's3' %}
s3:
+{% if openshift_hosted_registry_storage_s3_accesskey is defined %}
accesskey: {{ openshift_hosted_registry_storage_s3_accesskey }}
+{% endif %}
+{% if openshift_hosted_registry_storage_s3_secretkey is defined %}
secretkey: {{ openshift_hosted_registry_storage_s3_secretkey }}
+{% endif %}
region: {{ openshift_hosted_registry_storage_s3_region }}
{% if openshift_hosted_registry_storage_s3_regionendpoint is defined %}
regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }}
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index cba0f2de8..3c410eff2 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -97,3 +97,30 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
+
+Elasticsearch can be exposed for external clients outside of the cluster.
+- `openshift_logging_es_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
+Elasticsearch OPS too, if using an OPS cluster:
+- `openshift_logging_es_ops_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_ops_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es-ops." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_ops_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_ops_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ops_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index f43336dc4..837c54067 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -99,6 +99,22 @@ openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
openshift_logging_es_number_of_replicas: 0
+# for exposing es to external (outside of the cluster) clients
+openshift_logging_es_allow_external: False
+openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es certs
+openshift_logging_es_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es certs
+openshift_logging_es_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es certs
+openshift_logging_es_ca_ext: ""
+
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -118,6 +134,22 @@ openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+# for exposing es-ops to external (outside of the cluster) clients
+openshift_logging_es_ops_allow_external: False
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_ca_ext: ""
+
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
deleted file mode 100644
index 334c9402b..000000000
--- a/roles/openshift_logging/files/logging-deployer-sa.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: logging-deployer
-secrets:
-- name: logging-deployer
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 44b0b2d48..eac086e81 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -42,7 +42,7 @@ def map_from_pairs(source, delim="="):
if source == '':
return dict()
- return dict(source.split(delim) for item in source.split(","))
+ return dict(item.split(delim) for item in source.split(","))
# pylint: disable=too-few-public-methods
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index a55e72725..35accfb78 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -318,7 +318,7 @@ def main():
''' The main method '''
module = AnsibleModule( # noqa: F405
argument_spec=dict(
- admin_kubeconfig={"required": True, "type": "str"},
+ admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
oc_bin={"required": True, "type": "str"},
openshift_logging_namespace={"required": True, "type": "str"}
),
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 2f5b68b4d..0c7152b16 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -1,43 +1,42 @@
---
-- name: stop logging
- include: stop_cluster.yaml
-
# delete the deployment objects that we had created
- name: delete logging api objects
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: "{{ item }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra"
with_items:
- dc
- rc
- svc
- routes
- templates
- - daemonset
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
+ - ds
# delete the oauthclient
- name: delete oauthclient kibana-proxy
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: oauthclient
+ namespace: "{{ openshift_logging_namespace }}"
+ name: kibana-proxy
# delete any image streams that we may have created
- name: delete logging is
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: is
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra=support"
# delete our old secrets
- name: delete logging secrets
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: secret
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-fluentd
- logging-elasticsearch
@@ -45,71 +44,55 @@
- logging-kibana-proxy
- logging-curator
- logging-mux
- ignore_errors: yes
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete role bindings
-- name: delete rolebindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - logging-elasticsearch-view-role
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster role bindings
-- name: delete cluster role bindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster roles
-- name: delete cluster roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- name: "{{ item }}"
- namespace: "{{ openshift_logging_namespace }}"
state: absent
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- aggregated-logging-elasticsearch
- aggregated-logging-kibana
- aggregated-logging-curator
- aggregated-logging-fluentd
-# delete our roles
-- name: delete roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+# delete role bindings
+- name: delete rolebindings
+ oc_obj:
+ state: absent
+ kind: rolebinding
+ namespace: "{{ openshift_logging_namespace }}"
+ name: logging-elasticsearch-view-role
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ oc_obj:
+ state: absent
+ kind: clusterrolebindings
+ namespace: "{{ openshift_logging_namespace }}"
+ name: rolebinding-reader
+
+# delete cluster roles
+- name: delete cluster roles
+ oc_obj:
+ state: absent
+ kind: clusterrole
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
+ - rolebinding-reader
- daemonset-admin
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
# delete our configmaps
- name: delete configmaps
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: configmap
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-curator
- logging-elasticsearch
- logging-fluentd
- logging-mux
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index b34df018d..7169c4036 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -51,14 +51,32 @@
with_items:
- procure_component: mux
hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
- include: procure_shared_key.yaml
loop_control:
loop_var: shared_key_info
with_items:
- procure_component: mux
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es
+ hostnames: "es, {{openshift_logging_es_hostname}}"
+ when: openshift_logging_es_allow_external | bool
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es-ops
+ hostnames: "es-ops, {{openshift_logging_es_ops_hostname}}"
+ when:
+ - openshift_logging_es_allow_external | bool
+ - openshift_logging_use_ops | bool
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
@@ -108,6 +126,14 @@
loop_var: node_name
when: openshift_logging_use_mux
+- name: Generate PEM cert for Elasticsearch external route
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.es
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_es_allow_external | bool
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
deleted file mode 100644
index 56f590717..000000000
--- a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Generate ClusterRoleBindings
- template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
- vars:
- acct_name: aggregated-logging-elasticsearch
- obj_name: rolebinding-reader
- crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
- subjects:
- - kind: ServiceAccount
- name: "{{acct_name}}"
- namespace: "{{openshift_logging_namespace}}"
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
deleted file mode 100644
index 0b8b1014c..000000000
--- a/roles/openshift_logging/tasks/generate_clusterroles.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Generate ClusterRole for cluster-reader
- template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
- vars:
- obj_name: rolebinding-reader
- rules:
- - resources: [clusterrolebindings]
- verbs:
- - get
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
deleted file mode 100644
index b047eb35a..000000000
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ /dev/null
@@ -1,178 +0,0 @@
----
-- block:
- - fail:
- msg: "The openshift_logging_es_log_appenders '{{openshift_logging_es_log_appenders}}' has an unrecognized option and only supports the following as a list: {{es_log_appenders | join(', ')}}"
- when:
- - es_logging_contents is undefined
- - "{{ openshift_logging_es_log_appenders | list | difference(es_log_appenders) | length != 0 }}"
- changed_when: no
-
- - template:
- src: elasticsearch-logging.yml.j2
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- when: es_logging_contents is undefined
- changed_when: no
- check_mode: no
-
- - local_action: >
- template src=elasticsearch.yml.j2
- dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
- vars:
- - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
- - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
- when: es_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{ config_source | combine(override_config,recursive=True) | to_nice_yaml }}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- vars:
- config_source: "{{lookup('file','{{local_tmp.stdout}}/elasticsearch-gen-template.yml') | from_yaml }}"
- override_config: "{{openshift_logging_es_config | from_yaml}}"
- when: es_logging_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{es_logging_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
- changed_when: no
-
- - copy:
- content: "{{es_config_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
- --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
- register: es_configmap
- changed_when: no
-
- - copy:
- content: "{{es_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
- when: es_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: curator.yml
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{curator_config_contents}}"
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
- --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
- register: curator_configmap
- changed_when: no
-
- - copy:
- content: "{{curator_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
- when: curator_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: fluent.conf
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is undefined
- changed_when: no
-
- - copy:
- src: fluentd-throttle-config.yaml
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is undefined
- changed_when: no
-
- - copy:
- src: secure-forward.conf
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{fluentd_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_throttle_contents}}"
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_secureforward_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
- --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
- --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
- register: fluentd_configmap
- changed_when: no
-
- - copy:
- content: "{{fluentd_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
- when: fluentd_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: fluent.conf
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is undefined
- changed_when: no
-
- - copy:
- src: secure-forward.conf
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_securefoward_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{fluentd_mux_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_mux_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_secureforward_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
- --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
- --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
- register: mux_configmap
- changed_when: no
-
- - copy:
- content: "{{mux_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
- when: mux_configmap.stdout is defined
- changed_when: no
- check_mode: no
- when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
deleted file mode 100644
index 8aea4e81f..000000000
--- a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
- vars:
- component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
- vars:
- component: es
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
- vars:
- component: es-ops
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
- vars:
- component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- openshift_logging_es_host: logging-es-ops
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
deleted file mode 100644
index fa7a86c27..000000000
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Init pool of PersistentVolumeClaim names
- set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
- vars:
- pvc_name: "{{es_pvc_prefix}}-{{item| int}}"
- start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}"
- with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}}
- when:
- - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}"
- - es_pvc_size | search('^\d.*')
- check_mode: no
-
-- name: Generating PersistentVolumeClaims
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool | default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - not es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
-
-- name: Generating PersistentVolumeClaims - Dynamic
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool|default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
deleted file mode 100644
index 7dc9530df..000000000
--- a/roles/openshift_logging/tasks/generate_rolebindings.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Generate RoleBindings
- template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
- vars:
- obj_name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
deleted file mode 100644
index f76bb3a0a..000000000
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: openshift_logging_kibana_key | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: openshift_logging_kibana_cert | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: openshift_logging_kibana_ca | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when: kibana_ca is not defined
- changed_when: false
-
-- name: Generating logging routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana"
- route_host: "{{openshift_logging_kibana_hostname}}"
- service_name: "logging-kibana"
- tls_key: "{{kibana_key | default('') | b64decode}}"
- tls_cert: "{{kibana_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- changed_when: no
-
-- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}"
- changed_when: false
-
-- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when:
- - openshift_logging_use_ops | bool
- - kibana_ops_ca is not defined
- changed_when: false
-
-- name: Generating logging ops routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana-ops"
- route_host: "{{openshift_logging_kibana_ops_hostname}}"
- service_name: "logging-kibana-ops"
- tls_key: "{{kibana_ops_key | default('') | b64decode}}"
- tls_cert: "{{kibana_ops_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ops_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- when: openshift_logging_use_ops | bool
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
deleted file mode 100644
index c1da49fd8..000000000
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
----
-- name: Retrieving the cert to use when generating secrets for the logging components
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "kibana_key", file: "system.logging.kibana.key"}
- - { name: "kibana_cert", file: "system.logging.kibana.crt"}
- - { name: "curator_key", file: "system.logging.curator.key"}
- - { name: "curator_cert", file: "system.logging.curator.crt"}
- - { name: "fluentd_key", file: "system.logging.fluentd.key"}
- - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
- - { name: "kibana_internal_key", file: "kibana-internal.key"}
- - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
- - { name: "server_tls", file: "server-tls.json"}
-
-- name: Generating secrets for logging components
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- secret_keys: ["ca", "cert", "key"]
- with_items:
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
-
-- name: Retrieving the cert to use when generating secrets for mux
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: mux_key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "mux_key", file: "system.logging.mux.key"}
- - { name: "mux_cert", file: "system.logging.mux.crt"}
- - { name: "mux_shared_key", file: "mux_shared_key"}
- when: openshift_logging_use_mux
-
-- name: Generating secrets for mux
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
- secret_keys: ["ca", "cert", "key", "shared_key"]
- with_items:
- - mux
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
- when: openshift_logging_use_mux
-
-- name: Generating secrets for kibana proxy
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: logging-kibana-proxy
- secrets:
- - {key: oauth-secret, value: "{{oauth_secret}}"}
- - {key: session-secret, value: "{{session_secret}}"}
- - {key: server-key, value: "{{kibana_key_file}}"}
- - {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls.json, value: "{{server_tls_file}}"}
- secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
- kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
- kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
- server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
- check_mode: no
- changed_when: no
-
-- name: Generating secrets for elasticsearch
- command: >
- {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
- key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
- searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
- admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
- admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
- vars:
- secret_name: logging-elasticsearch
- secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"]
- register: logging_es_secret
- check_mode: no
- changed_when: no
-
-- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
- when: logging_es_secret.stdout is defined
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
deleted file mode 100644
index 21bcdfecb..000000000
--- a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Generating serviceaccounts
- template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
- vars:
- obj_name: aggregated-logging-{{component}}
- with_items:
- - elasticsearch
- - kibana
- - fluentd
- - curator
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
deleted file mode 100644
index e3a5c5eb3..000000000
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-- name: Generating logging-es service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
- vars:
- obj_name: logging-es
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
- vars:
- obj_name: logging-es-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
- vars:
- obj_name: logging-kibana
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
- vars:
- obj_name: logging-es-ops
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
- vars:
- obj_name: logging-es-ops-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
- vars:
- obj_name: logging-kibana-ops
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-mux service for external connections
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
- vars:
- obj_name: logging-mux
- ports:
- - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: mux
- externalIPs:
- - "{{ ansible_eth0.ipv4.address }}"
- check_mode: no
- changed_when: no
- when: openshift_logging_mux_allow_external
-
-- name: Generating logging-mux service for intra-cluster connections
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
- vars:
- obj_name: logging-mux
- ports:
- - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: mux
- check_mode: no
- changed_when: no
- when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
deleted file mode 100644
index ab8e207f1..000000000
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Check Curator current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Curator ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
- vars:
- component: curator
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
- replicas: "{{curator_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
- replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
deleted file mode 100644
index a981e7f7f..000000000
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
-
-- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
- with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
-
-### evaluate if the PVC attached to the dc currently matches the provided vars
-## if it does then we reuse that pvc in the DC
-- include: set_es_storage.yaml
- vars:
- es_component: es
- es_name: "{{ deployment.0 }}"
- es_spec: "{{ deployment.1 }}"
- es_pvc_count: "{{ deployment.2 | int }}"
- es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}"
- es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- - "{{ es_indices | default([]) }}"
- loop_control:
- loop_var: deployment
-## if it does not then we should create one that does and attach it
-
-## create new dc/pvc is needed
-- include: set_es_storage.yaml
- vars:
- es_component: es
- es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- es_spec: "{}"
- es_pvc_count: "{{ item | int - 1 }}"
- es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}"
- es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
-
-# --------- Tasks for Operation clusters ---------
-
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}"
-
-- name: Validate Elasticsearch cluster size for Ops
- fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
- vars:
- es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
- check_mode: no
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
-
-- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
- with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
- when:
- - openshift_logging_use_ops | bool
-
-- include: set_es_storage.yaml
- vars:
- es_component: es-ops
- es_name: "{{ deployment.0 }}"
- es_spec: "{{ deployment.1 }}"
- es_pvc_count: "{{ deployment.2 | int }}"
- es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}"
- es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- - "{{ es_ops_indices | default([]) }}"
- loop_control:
- loop_var: deployment
- when:
- - openshift_logging_use_ops | bool
-## if it does not then we should create one that does and attach it
-
-## create new dc/pvc is needed
-- include: set_es_storage.yaml
- vars:
- es_component: es-ops
- es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- es_spec: "{}"
- es_pvc_count: "{{ item | int - 1 }}"
- es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}"
- es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
- when:
- - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
deleted file mode 100644
index 6bc405819..000000000
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
- check_mode: no
-
-- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
- check_mode: no
-
-- name: Generating Fluentd daemonset
- template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
- vars:
- daemonset_name: logging-fluentd
- daemonset_component: fluentd
- daemonset_container_name: fluentd-elasticsearch
- daemonset_serviceAccount: aggregated-logging-fluentd
- ops_host: "{{ fluentd_ops_host }}"
- ops_port: "{{ fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
- check_mode: no
- changed_when: no
-
-- name: "Check fluentd privileged permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc/privileged -o jsonpath='{.users}'
- register: fluentd_privileged
- check_mode: no
- changed_when: no
-
-- name: "Set privileged permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- check_mode: no
- when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
-
-- name: "Check fluentd cluster-reader permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
- register: fluentd_cluster_reader
- check_mode: no
- changed_when: no
-
-- name: "Set cluster-reader permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- check_mode: no
- when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
deleted file mode 100644
index 52bdeb50d..000000000
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Check Kibana current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Kibana ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
- vars:
- component: kibana
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
- replicas: "{{kibana_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
- replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index aec455c22..f2d757294 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -2,89 +2,242 @@
- name: Gather OpenShift Logging Facts
openshift_logging_facts:
oc_bin: "{{openshift.common.client_binary}}"
- admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
openshift_logging_namespace: "{{openshift_logging_namespace}}"
- tags: logging_facts
+
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
+
+- name: Create logging cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/logging"
+ state: directory
+ mode: 0755
+ changed_when: False
check_mode: no
-- name: Validate Elasticsearch cluster size
- fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
-
-- name: Validate Elasticsearch Ops cluster size
- fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
-
-- name: Install logging
- include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
- when: openshift_hosted_logging_install | default(true) | bool
- with_items:
- - support
- - elasticsearch
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: install_component
-
-- name: Install logging mux
- include: "{{ role_path }}/tasks/install_mux.yaml"
- when: openshift_logging_use_mux
-
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
- register: object_def_files
- changed_when: no
-
-- slurp: src={{item}}
- register: object_defs
- with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
- changed_when: no
-
-- name: Create objects
- include: oc_apply.yaml
+- include: generate_certs.yaml
vars:
- - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- - namespace: "{{ openshift_logging_namespace }}"
- - file_name: "{{ file.source }}"
- - file_content: "{{ file.content | b64decode | from_yaml }}"
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: not ansible_check_mode
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
-- include: update_master_config.yaml
+## Elasticsearch
+
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_indices=[]
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count == 0
+
+# We don't allow scaling down of ES nodes currently
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch.pvcs }}"
+ - "{{ es_indices }}"
+
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_ops_indices=[]
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count == 0
+
+
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
+
+ #openshift_logging_elasticsearch_storage_type: "{{ }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
-- name: Printing out objects to create
- debug: msg={{file.content | b64decode }}
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: ansible_check_mode
-
- # TODO replace task with oc_secret module that supports
- # linking when available
-- name: Link Pull Secrets With Service Accounts
- include: oc_secret.yaml
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
+ - "{{ es_ops_indices }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Kibana
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_ops_deployment: true
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
+ openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
+ openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
+ openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
+ openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
+ openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
+ when:
+ - openshift_logging_use_ops | bool
+
+
+## Curator
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_ops_deployment: true
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+## Mux
+- include_role:
+ name: openshift_logging_mux
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- subcommand: link
- service_account: "{{sa_account}}"
- secret_name: "{{openshift_logging_image_pull_secret}}"
- add_args: "--for=pull"
- with_items:
- - default
- - aggregated-logging-elasticsearch
- - aggregated-logging-kibana
- - aggregated-logging-fluentd
- - aggregated-logging-curator
- register: link_pull_secret
- loop_control:
- loop_var: sa_account
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
when:
- - openshift_logging_image_pull_secret is defined
- - openshift_logging_image_pull_secret != ''
- failed_when: link_pull_secret.rc != 0
+ - openshift_logging_use_mux | bool
+
-- name: Scaling up cluster
- include: start_cluster.yaml
- when: start_cluster | default(true) | bool
+## Fluentd
+- include_role:
+ name: openshift_logging_fluentd
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_fluentd_use_journal: "{{ openshift.docker.options | search('journald') }}"
+ openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
+
+- include: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
deleted file mode 100644
index 91eeb95a1..000000000
--- a/roles/openshift_logging/tasks/install_mux.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
- check_mode: no
-
-- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
- check_mode: no
-
-- name: Check mux current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: mux_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Generating mux deploymentconfig
- template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
- vars:
- component: mux
- logging_component: mux
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- ops_host: "{{ mux_ops_host }}"
- ops_port: "{{ mux_ops_port }}"
- mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
- mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
- replicas: "{{mux_replica_count.stdout | default (0)}}"
- mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: "Check mux hostmount-anyuid permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc/hostmount-anyuid -o jsonpath='{.users}'
- register: mux_hostmount_anyuid
- check_mode: no
- changed_when: no
-
-- name: "Set hostmount-anyuid permissions for mux"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: mux_output
- failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
- check_mode: no
- when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
-
-- name: "Check mux cluster-reader permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
- register: mux_cluster_reader
- check_mode: no
- changed_when: no
-
-- name: "Set cluster-reader permissions for mux"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: mux2_output
- failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
- check_mode: no
- when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
deleted file mode 100644
index 877ce3149..000000000
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-# This is the base configuration for installing the other components
-- name: Set logging project
- oc_project:
- state: present
- name: "{{ openshift_logging_namespace }}"
- node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
-
-- name: Labelling logging project
- oc_label:
- state: present
- kind: namespace
- name: "{{ openshift_logging_namespace }}"
- labels:
- - key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_dict: "{{ openshift_logging_labels | default({}) }}"
- when:
- - openshift_logging_labels is defined
- - openshift_logging_labels is dict
-
-- name: Labelling logging project
- oc_label:
- state: present
- kind: namespace
- name: "{{ openshift_logging_namespace }}"
- labels:
- - key: "{{ openshift_logging_label_key }}"
- value: "{{ openshift_logging_label_value }}"
- when:
- - openshift_logging_label_key is defined
- - openshift_logging_label_key != ""
- - openshift_logging_label_value is defined
-
-- name: Create logging cert directory
- file: path={{openshift.common.config_base}}/logging state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_certs.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- name: Create temp directory for all our templates
- file: path={{mktemp.stdout}}/templates state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_secrets.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- include: generate_configmaps.yaml
-
-- include: generate_services.yaml
-
-- name: Generate kibana-proxy oauth client
- template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
- vars:
- secret: "{{oauth_secret}}"
- when: oauth_secret is defined
- check_mode: no
- changed_when: no
-
-- include: generate_clusterroles.yaml
-
-- include: generate_rolebindings.yaml
-
-- include: generate_clusterrolebindings.yaml
-
-- include: generate_serviceaccounts.yaml
-
-- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 3d8cd3410..f475024dd 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -30,33 +30,12 @@
check_mode: no
become: no
-- debug: msg="Created local temp dir {{local_tmp.stdout}}"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
- check_mode: no
- tags: logging_init
-
- include: "{{ role_path }}/tasks/install_logging.yaml"
when: openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
- when: openshift_logging_upgrade_logging | default(false) | bool
-
- include: "{{ role_path }}/tasks/delete_logging.yaml"
when:
- not openshift_logging_install_logging | default(false) | bool
- - not openshift_logging_upgrade_logging | default(false) | bool
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- tags: logging_cleanup
- changed_when: False
- check_mode: no
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
deleted file mode 100644
index a0ed56ebd..000000000
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- oc_obj:
- kind: "{{ file_content.kind }}"
- name: "{{ file_content.metadata.name }}"
- state: present
- namespace: "{{ namespace }}"
- files:
- - "{{ file_name }}"
- when: file_content.kind not in ["Service", "Route"]
-
-## still need to do this for services until the template logic is replaced by oc_*
-- block:
- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
-
- - name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
-
- - name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
-
- - name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
- when: file_content.kind in ["Service", "Route"]
diff --git a/roles/openshift_logging/tasks/oc_secret.yaml b/roles/openshift_logging/tasks/oc_secret.yaml
deleted file mode 100644
index de37e4f6d..000000000
--- a/roles/openshift_logging/tasks/oc_secret.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- secret {{subcommand}} {{service_account}} {{secret_name}}
- {{add_args}}
- -n {{openshift_logging_namespace}}
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
deleted file mode 100644
index 4afe4e641..000000000
--- a/roles/openshift_logging/tasks/set_es_storage.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
- when: es_spec.volumes is defined
-
-- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
- when:
- - es_spec.volumes is defined
- - es_storage_type.persistentVolumeClaim is defined
-
-- set_fact: es_storage_claim=""
- when:
- - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
-
-## take an ES dc and evaluate its storage option
-# if it is a hostmount or emptydir we don't do anything with it
-# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
-- oc_obj:
- state: list
- kind: pvc
- name: "{{ es_storage_claim }}"
- namespace: "{{ openshift_logging_namespace }}"
- register: pvc_spec
- failed_when: pvc_spec.results.stderr is defined
- when:
- - es_spec.volumes is defined
- - es_storage_type.persistentVolumeClaim is defined
-
-- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
- when:
- - pvc_spec.results is defined
- - pvc_spec.results.results[0].spec is defined
-
-# if not create the pvc and use it
-- block:
-
- - name: Generating PersistentVolumeClaims
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
- size: "{{ es_pvc_size }}"
- access_modes: "{{ openshift_logging_storage_access_modes }}"
- pv_selector: "{{ es_pv_selector }}"
- when: not es_pvc_dynamic | bool
- check_mode: no
- changed_when: no
-
- - name: Generating PersistentVolumeClaims - Dynamic
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
- annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{ es_pvc_size }}"
- access_modes: "{{ openshift_logging_storage_access_modes }}"
- pv_selector: "{{ es_pv_selector }}"
- when: es_pvc_dynamic | bool
- check_mode: no
- changed_when: no
-
- - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
-
- when:
- - es_pvc_size | search('^\d.*')
- - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
-
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: "{{ es_component }}"
- deploy_name: "{{ es_name }}"
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{ es_component }}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{ es_cpu_limit }}"
- es_memory_limit: "{{ es_memory_limit }}"
- es_node_selector: "{{ es_node_selector }}"
- es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
deleted file mode 100644
index c1592b830..000000000
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: start fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: add
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve mux
- oc_obj:
- state: list
- kind: dc
- selector: "component=mux"
- namespace: "{{openshift_logging_namespace}}"
- register: mux_dc
- when: openshift_logging_use_mux
-
-- name: start mux
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
- loop_control:
- loop_var: object
- when:
- - mux_dc.results is defined
- - mux_dc.results.results is defined
- - openshift_logging_use_mux
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
deleted file mode 100644
index f4b419d84..000000000
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ /dev/null
@@ -1,153 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: stop fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: absent
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve mux
- oc_obj:
- state: list
- kind: dc
- selector: "component=mux"
- namespace: "{{openshift_logging_namespace}}"
- register: mux_dc
- when: openshift_logging_use_mux
-
-- name: stop mux
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_mux
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
deleted file mode 100644
index 30fdbd2af..000000000
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Stop the Cluster
- include: stop_cluster.yaml
-
-- name: Upgrade logging
- include: install_logging.yaml
- vars:
- start_cluster: False
-
-# start ES so that we can run migrate script
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Wait for pods to start
- oc_obj:
- state: list
- kind: pods
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: running_pod
- until: running_pod.results.results[0]['items'] | selectattr('status.phase', 'match', '^Running$') | map(attribute='metadata.name') | list | length != 0
- retries: 30
- delay: 10
-
-- name: Run upgrade script
- script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
- register: script_output
- changed_when:
- - script_output.rc == 0
- - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
-
-- name: Start up rest of cluster
- include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
deleted file mode 100644
index 0d28db48e..000000000
--- a/roles/openshift_logging/templates/clusterrole.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: v1
-kind: ClusterRole
-metadata:
- name: {{obj_name}}
-rules:
-{% for rule in rules %}
-- resources:
-{% for kind in rule.resources %}
- - {{ kind }}
-{% endfor %}
- apiGroups:
-{% if rule.api_groups is defined %}
-{% for group in rule.api_groups %}
- - {{ group }}
-{% endfor %}
-{% endif %}
- verbs:
-{% for verb in rule.verbs %}
- - {{ verb }}
-{% endfor %}
-{% endfor %}
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2
deleted file mode 100644
index 2d25ff1fb..000000000
--- a/roles/openshift_logging/templates/clusterrolebinding.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: v1
-kind: ClusterRoleBinding
-metadata:
- name: {{obj_name}}
-{% if crb_usernames is defined %}
-userNames:
-{% for name in crb_usernames %}
- - {{ name }}
-{% endfor %}
-{% endif %}
-{% if crb_groupnames is defined %}
-groupNames:
-{% for name in crb_groupnames %}
- - {{ name }}
-{% endfor %}
-{% endif %}
-subjects:
-{% for sub in subjects %}
- - kind: {{ sub.kind }}
- name: {{ sub.name }}
- namespace: {{sub.namespace}}
-{% endfor %}
-roleRef:
- name: {{obj_name}}
diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial
deleted file mode 100644
index ccd01a816..000000000
--- a/roles/openshift_logging/templates/es-storage-emptydir.partial
+++ /dev/null
@@ -1 +0,0 @@
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial
deleted file mode 100644
index 07ddad9ba..000000000
--- a/roles/openshift_logging/templates/es-storage-hostpath.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- hostPath:
- path: {{es_storage['path']}}
diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial
deleted file mode 100644
index fcbff68de..000000000
--- a/roles/openshift_logging/templates/es-storage-pvc.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- persistentVolumeClaim:
- claimName: {{es_storage['pvc_claim']}}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
deleted file mode 100644
index 5c93d823e..000000000
--- a/roles/openshift_logging/templates/fluentd.j2
+++ /dev/null
@@ -1,167 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: "DaemonSet"
-metadata:
- name: "{{daemonset_name}}"
- labels:
- provider: openshift
- component: "{{daemonset_component}}"
- logging-infra: "{{daemonset_component}}"
-spec:
- selector:
- matchLabels:
- provider: openshift
- component: "{{daemonset_component}}"
- updateStrategy:
- type: RollingUpdate
- rollingUpdate:
- minReadySeconds: 600
- template:
- metadata:
- name: "{{daemonset_container_name}}"
- labels:
- logging-infra: "{{daemonset_component}}"
- provider: openshift
- component: "{{daemonset_component}}"
- spec:
- serviceAccountName: "{{daemonset_serviceAccount}}"
- nodeSelector:
- {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}"
- containers:
- - name: "{{daemonset_container_name}}"
- image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
- imagePullPolicy: Always
- securityContext:
- privileged: true
- resources:
- limits:
- cpu: {{openshift_logging_fluentd_cpu_limit}}
- memory: {{openshift_logging_fluentd_memory_limit}}
- volumeMounts:
- - name: runlogjournal
- mountPath: /run/log/journal
- - name: varlog
- mountPath: /var/log
- - name: varlibdockercontainers
- mountPath: /var/lib/docker/containers
- readOnly: true
- - name: config
- mountPath: /etc/fluent/configs.d/user
- readOnly: true
- - name: certs
- mountPath: /etc/fluent/keys
- readOnly: true
- - name: dockerhostname
- mountPath: /etc/docker-hostname
- readOnly: true
- - name: localtime
- mountPath: /etc/localtime
- readOnly: true
- - name: dockercfg
- mountPath: /etc/sysconfig/docker
- readOnly: true
- - name: dockerdaemoncfg
- mountPath: /etc/docker
- readOnly: true
-{% if openshift_logging_use_mux_client | bool %}
- - name: muxcerts
- mountPath: /etc/fluent/muxkeys
- readOnly: true
-{% endif %}
- env:
- - name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
- - name: "ES_HOST"
- value: "{{openshift_logging_es_host}}"
- - name: "ES_PORT"
- value: "{{openshift_logging_es_port}}"
- - name: "ES_CLIENT_CERT"
- value: "{{openshift_logging_es_client_cert}}"
- - name: "ES_CLIENT_KEY"
- value: "{{openshift_logging_es_client_key}}"
- - name: "ES_CA"
- value: "{{openshift_logging_es_ca}}"
- - name: "OPS_HOST"
- value: "{{ops_host}}"
- - name: "OPS_PORT"
- value: "{{ops_port}}"
- - name: "OPS_CLIENT_CERT"
- value: "{{openshift_logging_es_ops_client_cert}}"
- - name: "OPS_CLIENT_KEY"
- value: "{{openshift_logging_es_ops_client_key}}"
- - name: "OPS_CA"
- value: "{{openshift_logging_es_ops_ca}}"
- - name: "ES_COPY"
- value: "{{openshift_logging_fluentd_es_copy|lower}}"
- - name: "ES_COPY_HOST"
- value: "{{es_copy_host | default('')}}"
- - name: "ES_COPY_PORT"
- value: "{{es_copy_port | default('')}}"
- - name: "ES_COPY_SCHEME"
- value: "{{es_copy_scheme | default('https')}}"
- - name: "ES_COPY_CLIENT_CERT"
- value: "{{es_copy_client_cert | default('')}}"
- - name: "ES_COPY_CLIENT_KEY"
- value: "{{es_copy_client_key | default('')}}"
- - name: "ES_COPY_CA"
- value: "{{es_copy_ca | default('')}}"
- - name: "ES_COPY_USERNAME"
- value: "{{es_copy_username | default('')}}"
- - name: "ES_COPY_PASSWORD"
- value: "{{es_copy_password | default('')}}"
- - name: "OPS_COPY_HOST"
- value: "{{ops_copy_host | default('')}}"
- - name: "OPS_COPY_PORT"
- value: "{{ops_copy_port | default('')}}"
- - name: "OPS_COPY_SCHEME"
- value: "{{ops_copy_scheme | default('https')}}"
- - name: "OPS_COPY_CLIENT_CERT"
- value: "{{ops_copy_client_cert | default('')}}"
- - name: "OPS_COPY_CLIENT_KEY"
- value: "{{ops_copy_client_key | default('')}}"
- - name: "OPS_COPY_CA"
- value: "{{ops_copy_ca | default('')}}"
- - name: "OPS_COPY_USERNAME"
- value: "{{ops_copy_username | default('')}}"
- - name: "OPS_COPY_PASSWORD"
- value: "{{ops_copy_password | default('')}}"
- - name: "USE_JOURNAL"
- value: "{{openshift_logging_fluentd_use_journal|lower}}"
- - name: "JOURNAL_SOURCE"
- value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- - name: "JOURNAL_READ_FROM_HEAD"
- value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
- - name: "USE_MUX_CLIENT"
- value: "{{openshift_logging_use_mux_client| default('false')}}"
- volumes:
- - name: runlogjournal
- hostPath:
- path: /run/log/journal
- - name: varlog
- hostPath:
- path: /var/log
- - name: varlibdockercontainers
- hostPath:
- path: /var/lib/docker/containers
- - name: config
- configMap:
- name: logging-fluentd
- - name: certs
- secret:
- secretName: logging-fluentd
- - name: dockerhostname
- hostPath:
- path: /etc/hostname
- - name: localtime
- hostPath:
- path: /etc/localtime
- - name: dockercfg
- hostPath:
- path: /etc/sysconfig/docker
- - name: dockerdaemoncfg
- hostPath:
- path: /etc/docker
-{% if openshift_logging_use_mux_client | bool %}
- - name: muxcerts
- secret:
- secretName: logging-mux
-{% endif %}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
deleted file mode 100644
index eba4197da..000000000
--- a/roles/openshift_logging/templates/secret.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: "{{secret_name}}"
-type: Opaque
-data:
-{% for s in secrets %}
- "{{s.key}}" : "{{s.value | b64encode}}"
-{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
deleted file mode 100644
index 70644a39c..000000000
--- a/roles/openshift_logging/templates/service.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-apiVersion: "v1"
-kind: "Service"
-metadata:
- name: "{{obj_name}}"
-{% if labels is defined%}
- labels:
-{% for key, value in labels.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% endif %}
-spec:
- ports:
-{% for port in ports %}
- -
-{% for key, value in port.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% if port.targetPort is undefined %}
- clusterIP: "None"
-{% endif %}
-{% endfor %}
-{% if service_targetPort is defined %}
- targetPort: {{service_targetPort}}
-{% endif %}
- selector:
- {% for key, value in selector.iteritems() %}
- {{key}}: {{value}}
- {% endfor %}
-{% if externalIPs is defined -%}
- externalIPs:
-{% for ip in externalIPs %}
- - {{ ip }}
-{% endfor %}
-{% endif %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2
deleted file mode 100644
index b22acc594..000000000
--- a/roles/openshift_logging/templates/serviceaccount.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{obj_name}}
-{% if labels is defined%}
- labels:
-{% for key, value in labels.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% endif %}
-{% if secrets is defined %}
-secrets:
-{% for name in secrets %}
-- name: {{ name }}
-{% endfor %}
-{% endif %}
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
new file mode 100644
index 000000000..82ffb2f93
--- /dev/null
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -0,0 +1,33 @@
+---
+### General logging settings
+openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local"
+
+openshift_logging_curator_namespace: logging
+
+### Common settings
+openshift_logging_curator_nodeselector: ""
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_es_host: "logging-es"
+openshift_logging_curator_es_port: 9200
+
+# This should not exceed 1, should check for this
+openshift_logging_curator_replicas: 1
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_curator_ops_deployment: false
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#curator_config_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml
index 8d62d8e7d..8d62d8e7d 100644
--- a/roles/openshift_logging/files/curator.yml
+++ b/roles/openshift_logging_curator/files/curator.yml
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
new file mode 100644
index 000000000..6752fb7f9
--- /dev/null
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Curator Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml
new file mode 100644
index 000000000..94f8b4a97
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ curator_version: "{{ __latest_curator_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Curator
+ when: curator_version not in __allowed_curator_versions
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
new file mode 100644
index 000000000..ae7e48caa
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -0,0 +1,113 @@
+---
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# configmap
+- copy:
+ src: curator.yml
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ curator_config_contents }}"
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is defined
+ changed_when: no
+
+- name: Set Curator configmap
+ oc_configmap:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ from_file:
+ config.yaml: "{{ tempdir }}/curator.yml"
+
+# secret
+- name: Set Curator secret
+ oc_secret:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.curator.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.curator.crt"
+
+- set_fact:
+ curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+ curator_component: "{{ 'curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+# DC
+# TODO: scale should not exceed 1
+- name: Generate Curator deploymentconfig
+ template:
+ src: curator.j2
+ dest: "{{ tempdir }}/templates/curator-dc.yaml"
+ vars:
+ component: "{{ curator_component }}"
+ logging_component: curator
+ deploy_name: "{{ curator_name }}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: "{{ openshift_logging_curator_es_host }}"
+ es_port: "{{ openshift_logging_curator_es_port }}"
+ curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
+ replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: Set Curator DC
+ oc_obj:
+ state: present
+ name: "{{ curator_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/curator-dc.yaml"
+ delete_after: true
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index c6284166b..f8b84861f 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -42,13 +42,13 @@ spec:
resources:
limits:
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
+ value: "{{openshift_logging_curator_master_url}}"
-
name: "ES_HOST"
value: "{{es_host}}"
@@ -89,6 +89,9 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readOnly: true
volumes:
- name: certs
secret:
@@ -96,3 +99,5 @@ spec:
- name: config
configMap:
name: logging-curator
+ - name: elasticsearch-storage
+ emptyDir: {}
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
new file mode 100644
index 000000000..97525479e
--- /dev/null
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_curator_version: "3_5"
+__allowed_curator_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
new file mode 100644
index 000000000..c0b5d394e
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -0,0 +1,57 @@
+---
+### Common settings
+openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_elasticsearch_namespace: logging
+
+openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
+openshift_logging_elasticsearch_cpu_limit: 1000m
+openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
+openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
+
+openshift_logging_elasticsearch_replica_count: 1
+
+# ES deployment type
+openshift_logging_elasticsearch_deployment_type: "data-master"
+
+# ES deployment name
+openshift_logging_elasticsearch_deployment_name: ""
+
+# One of ['emptydir', 'pvc', 'hostmount']
+openshift_logging_elasticsearch_storage_type: "emptydir"
+
+# hostmount options
+openshift_logging_elasticsearch_hostmount_path: ""
+
+# pvc options
+# the name of the PVC we will bind to -- create it if it does not exist
+openshift_logging_elasticsearch_pvc_name: ""
+
+# required if the PVC does not already exist
+openshift_logging_elasticsearch_pvc_size: ""
+openshift_logging_elasticsearch_pvc_dynamic: false
+openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
+openshift_logging_elasticsearch_storage_group: '65534'
+
+openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_elasticsearch_ops_deployment: false
+
+openshift_logging_elasticsearch_ops_allow_cluster_reader: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+
+
+openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
index 339b5a1b2..339b5a1b2 100644
--- a/roles/openshift_logging/files/es_migration.sh
+++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh
diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
new file mode 100644
index 000000000..567c9f289
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: rolebinding-reader
+rules:
+- resources:
+ - clusterrolebindings
+ verbs:
+ - get
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
new file mode 100644
index 000000000..097270772
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Elasticsearch Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
new file mode 100644
index 000000000..1a952b5cf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -0,0 +1,19 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ es_version: "{{ __latest_es_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+- debug: var=openshift_logging_image_version
+
+# should we just assume that we will have the correct major version?
+- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Elasticsearch
+ when: es_version not in __allowed_es_versions
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
new file mode 100644
index 000000000..7e88a7498
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -0,0 +1,278 @@
+---
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
+
+- fail:
+ msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed
+ when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types
+
+- set_fact:
+ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# rolebinding reader
+- copy:
+ src: rolebinding-reader.yml
+ dest: "{{ tempdir }}/rolebinding-reader.yml"
+
+- name: Create rolebinding-reader role
+ oc_obj:
+ state: present
+ name: "rolebinding-reader"
+ kind: clusterrole
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/rolebinding-reader.yml"
+ delete_after: true
+
+# SA roles
+- name: Set rolebinding-reader permissions for ES
+ oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: rolebinding-reader
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+
+# View role and binding
+- name: Generate logging-elasticsearch-view-role
+ template:
+ src: rolebinding.j2
+ dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ changed_when: no
+
+- name: Set logging-elasticsearch-view-role role
+ oc_obj:
+ state: present
+ name: "logging-elasticsearch-view-role"
+ kind: rolebinding
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ delete_after: true
+
+# configmap
+- template:
+ src: elasticsearch-logging.yml.j2
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+- template:
+ src: elasticsearch.yml.j2
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ vars:
+ allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ es_logging_contents }}"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ es_config_contents }}"
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+- name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+
+
+# secret
+- name: Set ES secret
+ oc_secret:
+ state: present
+ name: "logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+
+# services
+- name: Set logging-{{ es_component }}-cluster service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}-cluster"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9300
+
+- name: Set logging-{{ es_component }} service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - not openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+
+- set_fact:
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ when: openshift_logging_elasticsearch_deployment_name == ""
+
+- set_fact:
+ es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}"
+ when: openshift_logging_elasticsearch_deployment_name != ""
+
+# DC
+- name: Set ES dc templates
+ template:
+ src: es.j2
+ dest: "{{ tempdir }}/templates/logging-es-dc.yml"
+ vars:
+ es_cluster_name: "{{ es_component }}"
+ component: "{{ es_component }}"
+ logging_component: elasticsearch
+ deploy_name: "{{ es_deploy_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
+ es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
+ replicas: 1
+
+- name: Set ES dc
+ oc_obj:
+ state: present
+ name: "{{ es_deploy_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
+ delete_after: true
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
index 499e77fb7..377abe21f 100644
--- a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
@@ -1,25 +1,14 @@
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
-rootLogger: ${es.logger.level}, {{root_logger}}
+rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: WARN
-
- # deprecation logging, turn to DEBUG to see them
- deprecation: WARN, deprecation_log_file
-
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
-
io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
- # aws will try to do some sketchy JMX stuff, but its not needed.
- com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
- com.amazonaws.metrics.AwsSdkMetrics: ERROR
-
- org.apache.http: INFO
-
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
@@ -39,14 +28,13 @@ logger:
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
- deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
@@ -56,13 +44,16 @@ appender:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- deprecation_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_deprecation.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 355642cb7..681f5a7e6 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -14,8 +14,8 @@ index:
flush_threshold_period: 5m
node:
- master: true
- data: true
+ master: ${IS_MASTER}
+ data: ${HAS_DATA}
network:
host: 0.0.0.0
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 680c16cf4..e129205ca 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -8,7 +8,7 @@ metadata:
deployment: "{{deploy_name}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -29,7 +29,7 @@ spec:
serviceAccountName: aggregated-logging-elasticsearch
securityContext:
supplementalGroups:
- - {{openshift_logging_es_storage_group}}
+ - {{openshift_logging_elasticsearch_storage_group}}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
@@ -73,7 +73,7 @@ spec:
value: "logging-{{es_cluster_name}}"
-
name: "INSTANCE_RAM"
- value: "{{openshift_logging_es_memory_limit}}"
+ value: "{{openshift_logging_elasticsearch_memory_limit}}"
-
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
@@ -82,7 +82,15 @@ spec:
value: "{{es_recover_expected_nodes}}"
-
name: "RECOVER_AFTER_TIME"
- value: "{{openshift_logging_es_recover_after_time}}"
+ value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+ -
+ name: "IS_MASTER"
+ value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+ -
+ name: "HAS_DATA"
+ value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+
volumeMounts:
- name: elasticsearch
mountPath: /etc/elasticsearch/secret
@@ -107,4 +115,12 @@ spec:
configMap:
name: logging-elasticsearch
- name: elasticsearch-storage
-{% include 'es-storage-'+ es_storage['kind'] + '.partial' %}
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+ hostPath:
+ path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index 07d81afff..f19a3a750 100644
--- a/roles/openshift_logging/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: "{{obj_name}}"
+ name: {{obj_name}}
labels:
logging-infra: support
{% if annotations is defined %}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
index fcd4e87cc..fcd4e87cc 100644
--- a/roles/openshift_logging/templates/rolebinding.j2
+++ b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
new file mode 100644
index 000000000..7a1f5048b
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -0,0 +1,12 @@
+---
+__latest_es_version: "3_5"
+__allowed_es_versions: ["3_5", "3_6"]
+__allowed_es_types: ["data-master", "data-client", "master", "client"]
+
+# TODO: integrate these
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
+es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}"
+es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
+es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
new file mode 100644
index 000000000..228196d74
--- /dev/null
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -0,0 +1,59 @@
+---
+### General logging settings
+openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_fluentd_namespace: logging
+
+### Common settings
+openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_hosts: ['--all']
+
+# float time in seconds to wait between node labelling
+openshift_logging_fluentd_label_delay: '0.5'
+
+# Fluentd deployment type
+openshift_logging_fluentd_deployment_type: "hosted"
+
+### Used by "hosted" and "secure-host" deployments
+
+# Destination for the application based logs
+openshift_logging_fluentd_app_host: "logging-es"
+openshift_logging_fluentd_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_fluentd_ops_host: "{{ openshift_logging_fluentd_app_host }}"
+openshift_logging_fluentd_ops_port: "{{ openshift_logging_fluentd_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+#openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}"
+openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_fluentd_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_app_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_app_ca: /etc/fluent/keys/ca
+openshift_logging_fluentd_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_ops_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_ops_ca: /etc/fluent/keys/ca
+
+
+# used by "secure-host" and "secure-aggregator" deployments
+openshift_logging_fluentd_shared_key: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(128) }}"
+openshift_logging_fluentd_aggregating_port: 24284
+openshift_logging_fluentd_aggregating_host: "${HOSTNAME}"
+openshift_logging_fluentd_aggregating_secure: "no"
+openshift_logging_fluentd_aggregating_strict: "no"
+openshift_logging_fluentd_aggregating_cert_path: none
+openshift_logging_fluentd_aggregating_key_path: none
+openshift_logging_fluentd_aggregating_passphrase: none
+
+### Deprecating in 3.6
+openshift_logging_fluentd_es_copy: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
index 375621ff1..375621ff1 100644
--- a/roles/openshift_logging/files/fluentd-throttle-config.yaml
+++ b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..f4483df79 100644
--- a/roles/openshift_logging/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
new file mode 100644
index 000000000..2003aacb2
--- /dev/null
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Fluentd Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
new file mode 100644
index 000000000..a1ba71b1b
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ fluentd_version: "{{ __latest_fluentd_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Fluentd
+ when: fluentd_version not in __allowed_fluentd_versions
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
new file mode 100644
index 000000000..e92a35f27
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -0,0 +1,10 @@
+---
+- name: Label {{ node }} for Fluentd deployment
+ oc_label:
+ name: "{{ node }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+
+# wait half a second between labels
+- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
new file mode 100644
index 000000000..8194223e8
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -0,0 +1,206 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_fluentd_app_host or openshift_logging_fluentd_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_fluentd_ops_host or openshift_logging_fluentd_ops_host == ''
+
+- fail:
+ msg: Invalid deployment type, one of ['hosted', 'secure-aggregator', 'secure-host'] allowed
+ when: not openshift_logging_fluentd_deployment_type in __allowed_fluentd_types
+
+- include: determine_version.yaml
+
+- set_fact:
+ openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}"
+ when:
+ - openshift_hosted_logging_use_journal is defined
+ - openshift_logging_fluentd_use_journal is not defined
+
+- set_fact:
+ openshift_logging_fluentd_use_journal: "{{ __fluentd_use_journal }}"
+ when:
+ - openshift_hosted_logging_use_journal is not defined
+ - openshift_logging_fluentd_use_journal is not defined
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# create Fluentd configmap
+- template:
+ src: fluent.conf.j2
+ dest: "{{ tempdir }}/fluent.conf"
+ vars:
+ deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_config_contents }}"
+ dest: "{{ tempdir }}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_throttle_contents }}"
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_secureforward_contents }}"
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Fluentd configmap
+ oc_configmap:
+ state: present
+ name: "logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent.conf"
+ throttle-config.yaml: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ secure-forward.conf: "{{ tempdir }}/secure-forward.conf"
+
+# create Fluentd secret
+# TODO: add aggregation secrets if necessary
+- name: Set logging-fluentd secret
+ oc_secret:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
+
+# create Fluentd daemonset
+
+# this should change based on the type of fluentd deployment to be done...
+# TODO: pass in aggregation configurations
+- name: Generate logging-fluentd daemonset definition
+ template:
+ src: fluentd.j2
+ dest: "{{ tempdir }}/templates/logging-fluentd.yaml"
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ app_host: "{{ openshift_logging_fluentd_app_host }}"
+ app_port: "{{ openshift_logging_fluentd_app_port }}"
+ ops_host: "{{ openshift_logging_fluentd_ops_host }}"
+ ops_port: "{{ openshift_logging_fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-fluentd daemonset
+ oc_obj:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ kind: daemonset
+ files:
+ - "{{ tempdir }}/templates/logging-fluentd.yaml"
+ delete_after: true
+
+# Scale up Fluentd
+- name: Retrieve list of Fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ register: fluentd_hosts
+
+- name: Set openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
+- include: label_and_wait.yaml
+ vars:
+ node: "{{ fluentd_host }}"
+ with_items: "{{ openshift_logging_fluentd_hosts }}"
+ loop_control:
+ loop_var: fluentd_host
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
new file mode 100644
index 000000000..46de94d60
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
@@ -0,0 +1,78 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+{% if deploy_type in ['hosted', 'secure-aggregator'] %}
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+{% else %}
+<source>
+ @type secure_forward
+ @label @INGRESS
+
+ self_hostname ${HOSTNAME}
+ bind 0.0.0.0
+ port {{openshift_logging_fluentd_aggregating_port}}
+
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <client>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ </client>
+</source>
+{% endif %}
+
+<label @INGRESS>
+{% if deploy_type in ['hosted', 'secure-host'] %}
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-viaq-data-model.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+{% else %}
+ <match **>
+ @type secure_forward
+
+ self_hostname ${HOSTNAME}
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <server>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ port {{openshift_logging_fluentd_aggregating_port}}
+ </server>
+ </match>
+{% endif %}
+</label>
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
new file mode 100644
index 000000000..e185938e3
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -0,0 +1,123 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{ daemonset_name }}"
+ labels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ logging-infra: "{{ daemonset_component }}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{ daemonset_container_name }}"
+ labels:
+ logging-infra: "{{ daemonset_component }}"
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ spec:
+ serviceAccountName: "{{ daemonset_serviceAccount }}"
+ nodeSelector:
+ {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"
+ containers:
+ - name: "{{ daemonset_container_name }}"
+ image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{ openshift_logging_fluentd_cpu_limit }}
+ memory: {{ openshift_logging_fluentd_memory_limit }}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{ openshift_logging_fluentd_master_url }}"
+ - name: "ES_HOST"
+ value: "{{ app_host }}"
+ - name: "ES_PORT"
+ value: "{{ app_port }}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_app_client_cert }}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_app_client_key }}"
+ - name: "ES_CA"
+ value: "{{ openshift_logging_fluentd_app_ca }}"
+ - name: "OPS_HOST"
+ value: "{{ ops_host }}"
+ - name: "OPS_PORT"
+ value: "{{ ops_port }}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_ops_client_cert }}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_ops_client_key }}"
+ - name: "OPS_CA"
+ value: "{{ openshift_logging_fluentd_ops_ca }}"
+ - name: "ES_COPY"
+ value: "false"
+ - name: "USE_JOURNAL"
+ value: "{{ openshift_logging_fluentd_use_journal | lower }}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
new file mode 100644
index 000000000..f601b738e
--- /dev/null
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -0,0 +1,5 @@
+---
+__latest_fluentd_version: "3_5"
+__allowed_fluentd_versions: ["3_5", "3_6"]
+__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
+__fluentd_use_journal: "{{ (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
new file mode 100644
index 000000000..23337bcd2
--- /dev/null
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+### Common settings
+openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_kibana_namespace: logging
+
+openshift_logging_kibana_nodeselector: ""
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
+
+openshift_logging_kibana_hostname: "kibana.router.default.svc.cluster.local"
+
+openshift_logging_kibana_es_host: "logging-es"
+openshift_logging_kibana_es_port: 9200
+
+openshift_logging_kibana_replicas: 1
+openshift_logging_kibana_edge_term_policy: Redirect
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_kibana_ops_deployment: false
+
+# Proxy settings
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
+
+#The absolute path on the control node to the cert file to use
+#for the public facing kibana certs
+openshift_logging_kibana_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing kibana certs
+openshift_logging_kibana_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing kibana certs
+openshift_logging_kibana_ca: ""
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
new file mode 100644
index 000000000..89e08abc0
--- /dev/null
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Kibana Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml
new file mode 100644
index 000000000..53e15af5f
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ kibana_version: "{{ __latest_kibana_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Kibana
+ when: kibana_version not in __allowed_kibana_versions
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
new file mode 100644
index 000000000..55b28ee24
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -0,0 +1,232 @@
+---
+# fail is we don't have an endpoint for ES to connect to?
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+- set_fact:
+ kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp:
+ src: "{{ generated_certs_dir }}/{{ item.file }}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+
+# services
+- name: Set {{ kibana_name }} service
+ oc_service:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_kibana_namespace }}"
+ selector:
+ component: "{{ kibana_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 443
+ targetPort: "oaproxy"
+
+# create routes
+# TODO: set up these certs differently?
+- set_fact:
+ kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}"
+ when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}"
+ when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}"
+ when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
+ when: kibana_ca is not defined
+ changed_when: false
+
+- name: Generating Kibana route template
+ template:
+ src: route_reencrypt.j2
+ dest: "{{ tempdir }}/templates/kibana-route.yaml"
+ vars:
+ obj_name: "{{ kibana_name }}"
+ route_host: "{{ openshift_logging_kibana_hostname }}"
+ service_name: "{{ kibana_name }}"
+ tls_key: "{{ kibana_key | default('') | b64decode }}"
+ tls_cert: "{{ kibana_cert | default('') | b64decode }}"
+ tls_ca_cert: "{{ kibana_ca | b64decode }}"
+ tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}"
+ edge_term_policy: "{{ openshift_logging_kibana_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ changed_when: no
+
+# This currently has an issue if the host name changes
+- name: Setting Kibana route
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: route
+ files:
+ - "{{ tempdir }}/templates/kibana-route.yaml"
+
+# gen session_secret -- if necessary
+# TODO: make idempotent
+- name: Generate proxy session
+ set_fact:
+ session_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(200) }}"
+ check_mode: no
+
+# gen oauth_secret -- if necessary
+# TODO: make idempotent
+- name: Generate oauth client secret
+ set_fact:
+ oauth_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(64) }}"
+ check_mode: no
+
+# create oauth client
+- name: Create oauth-client template
+ template:
+ src: oauth-client.j2
+ dest: "{{ tempdir }}/templates/oauth-client.yml"
+ vars:
+ kibana_hostname: "{{ openshift_logging_kibana_hostname }}"
+ secret: "{{ oauth_secret }}"
+
+- name: Set kibana-proxy oauth-client
+ oc_obj:
+ state: present
+ name: "kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ files:
+ - "{{ tempdir }}/templates/oauth-client.yml"
+ delete_after: true
+
+# create Kibana secret
+- name: Set Kibana secret
+ oc_secret:
+ state: present
+ name: "logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.kibana.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.kibana.crt"
+
+# create Kibana-proxy secret
+- name: Set Kibana Proxy secret
+ oc_secret:
+ state: present
+ name: "logging-kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ # TODO: when possible to have both files and contents for oc_secret use this
+ #files:
+ #- name: server-key
+ # path: "{{ generated_certs_dir }}/kibana-internal.key"
+ #- name: server-cert
+ # path: "{{ generated_certs_dir }}/kibana-internal.crt"
+ #- name: server-tls
+ # path: "{{ generated_certs_dir }}/server-tls.json"
+ contents:
+ - path: oauth-secret
+ data: "{{ oauth_secret }}"
+ - path: session-secret
+ data: "{{ session_secret }}"
+ - path: server-key
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}"
+ - path: server-cert
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}"
+ - path: server-tls
+ data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}"
+
+# create Kibana DC
+- name: Generate Kibana DC template
+ template:
+ src: kibana.j2
+ dest: "{{ tempdir }}/templates/kibana-dc.yaml"
+ vars:
+ component: "{{ kibana_component }}"
+ logging_component: kibana
+ deploy_name: "{{ kibana_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}"
+ proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_kibana_es_host }}"
+ es_port: "{{ openshift_logging_kibana_es_port }}"
+ kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
+ replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
+ kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
+
+- name: Set Kibana DC
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/kibana-dc.yaml"
+ delete_after: true
+
+# update master configs?
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 25fab9ac4..f8043812b 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -1,17 +1,17 @@
apiVersion: "v1"
kind: "DeploymentConfig"
metadata:
- name: "{{deploy_name}}"
+ name: "{{ deploy_name }}"
labels:
provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{ replicas | default(1) }}
selector:
provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
strategy:
rollingParams:
intervalSeconds: 1
@@ -20,37 +20,39 @@ spec:
type: Rolling
template:
metadata:
- name: "{{deploy_name}}"
+ name: "{{ deploy_name }}"
labels:
- logging-infra: "{{logging_component}}"
+ logging-infra: "{{ logging_component }}"
provider: openshift
- component: "{{component}}"
+ component: "{{ component }}"
spec:
serviceAccountName: aggregated-logging-kibana
{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
nodeSelector:
{% for key, value in kibana_node_selector.iteritems() %}
- {{key}}: "{{value}}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
-
name: "kibana"
- image: {{image}}
+ image: {{ image }}
imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
resources:
limits:
-{% if kibana_cpu_limit is not none %}
- cpu: "{{kibana_cpu_limit}}"
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+ cpu: "{{ kibana_cpu_limit }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
- memory: "{{kibana_memory_limit | default('736Mi') }}"
{% endif %}
env:
- name: "ES_HOST"
- value: "{{es_host}}"
+ value: "{{ es_host }}"
- name: "ES_PORT"
- value: "{{es_port}}"
+ value: "{{ es_port }}"
-
name: "KIBANA_MEMORY_LIMIT"
valueFrom:
@@ -61,17 +63,26 @@ spec:
- name: kibana
mountPath: /etc/kibana/keys
readOnly: true
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/kibana/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
-
name: "kibana-proxy"
- image: {{proxy_image}}
+ image: {{ proxy_image }}
imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
resources:
limits:
-{% if kibana_proxy_cpu_limit is not none %}
- cpu: "{{kibana_proxy_cpu_limit}}"
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+ cpu: "{{ kibana_proxy_cpu_limit }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
- memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"
{% endif %}
ports:
-
@@ -92,19 +103,19 @@ spec:
value: kibana-proxy
-
name: "OAP_MASTER_URL"
- value: {{openshift_logging_master_url}}
+ value: {{ openshift_logging_kibana_master_url }}
-
name: "OAP_PUBLIC_MASTER_URL"
- value: {{openshift_logging_master_public_url}}
+ value: {{ openshift_logging_kibana_master_public_url }}
-
name: "OAP_LOGOUT_REDIRECT"
- value: {{openshift_logging_master_public_url}}/console/logout
+ value: {{ openshift_logging_kibana_master_public_url }}/console/logout
-
name: "OAP_MASTER_CA_FILE"
value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
-
name: "OAP_DEBUG"
- value: "{{openshift_logging_kibana_proxy_debug}}"
+ value: "{{ openshift_logging_kibana_proxy_debug }}"
-
name: "OAP_OAUTH_SECRET_FILE"
value: "/secret/oauth-secret"
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2
index 41d3123cb..6767f6d89 100644
--- a/roles/openshift_logging/templates/oauth-client.j2
+++ b/roles/openshift_logging_kibana/templates/oauth-client.j2
@@ -6,8 +6,7 @@ metadata:
logging-infra: support
secret: {{secret}}
redirectURIs:
-- https://{{openshift_logging_kibana_hostname}}
-- https://{{openshift_logging_kibana_ops_hostname}}
+- https://{{kibana_hostname}}
scopeRestrictions:
- literals:
- user:info
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
index cf8a9e65f..cf8a9e65f 100644
--- a/roles/openshift_logging/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
new file mode 100644
index 000000000..87b281c4b
--- /dev/null
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_kibana_version: "3_5"
+__allowed_kibana_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
new file mode 100644
index 000000000..8aaa28706
--- /dev/null
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -0,0 +1,43 @@
+---
+### General logging settings
+openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_mux_namespace: logging
+
+### Common settings
+openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
+openshift_logging_mux_cpu_limit: 100m
+openshift_logging_mux_memory_limit: 512Mi
+
+openshift_logging_mux_replicas: 1
+
+# Destination for the application based logs
+openshift_logging_mux_app_host: "logging-es"
+openshift_logging_mux_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_mux_ops_host: "{{ openshift_logging_mux_app_host }}"
+openshift_logging_mux_ops_port: "{{ openshift_logging_mux_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+openshift_logging_mux_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_mux_allow_external: false
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+
+openshift_logging_mux_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_app_client_key: /etc/fluent/keys/key
+openshift_logging_mux_app_ca: /etc/fluent/keys/ca
+openshift_logging_mux_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_ops_client_key: /etc/fluent/keys/key
+openshift_logging_mux_ops_ca: /etc/fluent/keys/ca
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#mux_config_contents:
+#mux_throttle_contents:
+#mux_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf
index aeaa705ee..aeaa705ee 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging_mux/files/fluent.conf
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
new file mode 100644
index 000000000..f40beb79d
--- /dev/null
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Mux Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_mux/tasks/determine_version.yaml b/roles/openshift_logging_mux/tasks/determine_version.yaml
new file mode 100644
index 000000000..229bcf3d5
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ mux_version: "{{ __latest_mux_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: mux_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for mux
+ when: mux_version not in __allowed_mux_versions
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
new file mode 100644
index 000000000..432cab9e9
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -0,0 +1,197 @@
+---
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_mux_app_host or openshift_logging_mux_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set hostmount-anyuid permissions
+- name: Set hostmount-anyuid permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# create Mux configmap
+- copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Mux configmap
+ oc_configmap:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent-mux.conf"
+ secure-forward.conf: "{{ tempdir }}/secure-forward-mux.conf"
+
+# create Mux secret
+- name: Set logging-mux secret
+ oc_secret:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.mux.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.mux.crt"
+ - name: shared_key
+ path: "{{ generated_certs_dir }}/mux_shared_key"
+
+# services
+- name: Set logging-mux service for external communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ # pending #4091
+ # externalIPs:
+ # - "{{ ansible_eth0.ipv4.address }}"
+ when: openshift_logging_mux_allow_external | bool
+
+- name: Set logging-mux service for internal communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ when: not openshift_logging_mux_allow_external | bool
+
+# create Mux DC
+- name: Generating mux deploymentconfig
+ template:
+ src: mux.j2
+ dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml"
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{ component }}"
+ image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_mux_app_host }}"
+ es_port: "{{ openshift_logging_mux_app_port }}"
+ ops_host: "{{ openshift_logging_mux_ops_host }}"
+ ops_port: "{{ openshift_logging_mux_ops_port }}"
+ mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
+ replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
+ mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-mux DC
+ oc_obj:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-mux-dc.yaml"
+ delete_after: true
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 41e6abd52..770a2bfbd 100644
--- a/roles/openshift_logging/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -26,7 +26,7 @@ spec:
provider: openshift
component: "{{component}}"
spec:
- serviceAccountName: aggregated-logging-fluentd
+ serviceAccountName: aggregated-logging-mux
{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
nodeSelector:
{% for key, value in mux_node_selector.iteritems() %}
@@ -68,33 +68,33 @@ spec:
readOnly: true
env:
- name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
+ value: "{{openshift_logging_mux_master_url}}"
- name: "ES_HOST"
- value: "{{openshift_logging_es_host}}"
+ value: "{{openshift_logging_mux_app_host}}"
- name: "ES_PORT"
- value: "{{openshift_logging_es_port}}"
+ value: "{{openshift_logging_mux_app_port}}"
- name: "ES_CLIENT_CERT"
- value: "{{openshift_logging_es_client_cert}}"
+ value: "{{openshift_logging_mux_app_client_cert}}"
- name: "ES_CLIENT_KEY"
- value: "{{openshift_logging_es_client_key}}"
+ value: "{{openshift_logging_mux_app_client_key}}"
- name: "ES_CA"
- value: "{{openshift_logging_es_ca}}"
+ value: "{{openshift_logging_mux_app_ca}}"
- name: "OPS_HOST"
- value: "{{ops_host}}"
+ value: "{{openshift_logging_mux_ops_host}}"
- name: "OPS_PORT"
- value: "{{ops_port}}"
+ value: "{{openshift_logging_mux_ops_port}}"
- name: "OPS_CLIENT_CERT"
- value: "{{openshift_logging_es_ops_client_cert}}"
+ value: "{{openshift_logging_mux_ops_client_cert}}"
- name: "OPS_CLIENT_KEY"
- value: "{{openshift_logging_es_ops_client_key}}"
+ value: "{{openshift_logging_mux_ops_client_key}}"
- name: "OPS_CA"
- value: "{{openshift_logging_es_ops_ca}}"
+ value: "{{openshift_logging_mux_ops_ca}}"
- name: "USE_JOURNAL"
value: "false"
- name: "JOURNAL_SOURCE"
- value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ value: "{{openshift_logging_mux_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
- value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ value: "{{openshift_logging_mux_journal_read_from_head|lower}}"
- name: FORWARD_LISTEN_HOST
value: "{{ openshift_logging_mux_hostname }}"
- name: FORWARD_LISTEN_PORT
@@ -102,14 +102,14 @@ spec:
- name: USE_MUX
value: "true"
- name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ value: "{{ openshift_logging_mux_allow_external | default('false') }}"
volumes:
- name: config
configMap:
name: logging-mux
- name: certs
secret:
- secretName: logging-fluentd
+ secretName: logging-mux
- name: dockerhostname
hostPath:
path: /etc/hostname
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
new file mode 100644
index 000000000..4234b74e2
--- /dev/null
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_mux_version: "3_5"
+__allowed_mux_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index c3300a7ef..e5362105c 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -15,17 +15,18 @@ Role Variables
From this role:
-| Name | Default value | |
-|-------------------------------------|-----------------------|--------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
+| Name | Default value | |
+|-------------------------------------|-----------------------|-------------------------------------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
-| oreg_url | UNDEF | Default docker registry to use |
-| openshift_master_api_port | UNDEF | |
-| openshift_master_console_port | UNDEF | |
-| openshift_master_api_url | UNDEF | |
-| openshift_master_console_url | UNDEF | |
-| openshift_master_public_api_url | UNDEF | |
-| openshift_master_public_console_url | UNDEF | |
+| oreg_url | UNDEF | Default docker registry to use |
+| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
From openshift_common:
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 938ac2a12..1935d9592 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -44,10 +44,10 @@ assetConfig:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift_master_ha | bool %}
{% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}
auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
{% endif %}
+{% if openshift_master_ha | bool %}
controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
{% endif %}
{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
@@ -274,5 +274,12 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
+{% if openshift_template_service_broker_namespaces is defined %}
+templateServiceBrokerConfig:
+ templateNamespaces:
+{% for namespace in openshift_template_service_broker_namespaces %}
+ - {{ namespace }}
+{% endfor %}
+{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index b5be193d2..e767772ce 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -468,7 +468,8 @@ class GitHubIdentityProvider(IdentityProviderOauthBase):
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
- self._optional += [['organizations']]
+ self._optional += [['organizations'],
+ ['teams']]
class FilterModule(object):
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 79f054b42..ef8dcd5fd 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -74,7 +74,7 @@
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
- registry_url: "{{ oreg_url | default(None) }}"
+ registry_url: "{{ oreg_url_master | default(oreg_url) | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index e8b7bea5c..5d8506a73 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,4 +1,12 @@
---
+- local_action: shell python -c 'import passlib' 2>/dev/null || echo not installed
+ register: passlib_result
+
+- name: Check that python-passlib is available on the control host
+ assert:
+ that:
+ - "'not installed' not in passlib_result.stdout"
+ msg: "python-passlib rpm must be installed on control host"
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
@@ -25,6 +33,7 @@
local_action: command mktemp -d
register: local_tmp
changed_when: False
+ become: false
- name: Copy the admin client config(s)
command: >
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index b69b60c1d..fb0b494da 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -15,10 +15,11 @@ Role Variables
--------------
From this role:
-| Name | Default value | |
-|------------------------------------------|-----------------------|--------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
-| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|----------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
From openshift_common:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index bf66ef1d6..5904ca9bc 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -9,3 +9,6 @@ os_firewall_allow:
- service: OpenShift OVS sdn
port: 4789/udp
when: openshift.common.use_openshift_sdn | bool
+- service: Calico BGP Port
+ port: 179/tcp
+ when: openshift.common.use_calico | bool
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index cb51416d4..4dcf1eef8 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -12,3 +12,6 @@
- name: restart node
systemd: name={{ openshift.common.service_type }}-node state=restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+
+- name: reload sysctl.conf
+ command: /sbin/sysctl -p
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 0da41d0c1..3b7e8126a 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -33,6 +33,12 @@ dependencies:
when: openshift.common.use_openshift_sdn | bool
- role: os_firewall
os_firewall_allow:
+ - service: Calico BGP Port
+ port: 179/tcp
+ when: openshift.common.use_calico | bool
+
+- role: os_firewall
+ os_firewall_allow:
- service: Kubernetes service NodePort TCP
port: "{{ openshift_node_port_range | default('') }}/tcp"
- service: Kubernetes service NodePort UDP
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 656874f56..a8beaa060 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -22,7 +22,7 @@
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url | default(none) }}"
+ registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
@@ -104,8 +104,14 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+#
+# Use lineinfile w/ a handler for this task until
+# https://github.com/ansible/ansible/pull/24277 is included in an
+# ansible release and we can use the sysctl module.
- name: Persist net.ipv4.ip_forward sysctl entry
- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
+ lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
+ notify:
+ - reload sysctl.conf
- name: Start and enable openvswitch service
systemd:
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index a41a97e01..d44839d69 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -92,8 +92,8 @@
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
@@ -143,7 +143,7 @@
name: "{{ openshift.common.hostname | lower }}"
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
- until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
# Give the node two minutes to come back online.
retries: 24
delay: 5
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
index 88801e487..a86c96df7 100644
--- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -8,7 +8,7 @@ def map_from_pairs(source, delim="="):
if source == '':
return dict()
- return dict(source.split(delim) for item in source.split(","))
+ return dict(item.split(delim) for item in source.split(","))
# pylint: disable=too-few-public-methods
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index f2f4d16f0..b35a3fa3c 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -117,7 +117,7 @@
debug:
msg: >
openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url.
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
when:
- not is_containerized | bool
- openshift_image_tag is defined