summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging_elasticsearch
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_logging_elasticsearch')
-rw-r--r--roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/handlers/main.yml13
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml10
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml42
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml356
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml113
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml37
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 (renamed from roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 (renamed from roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/es.j2 (renamed from roles/openshift_logging_elasticsearch/templates/es.j2)74
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 (renamed from roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 (renamed from roles/openshift_logging_elasticsearch/templates/pvc.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 (renamed from roles/openshift_logging_elasticsearch/templates/rolebinding.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 (renamed from roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j274
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/es.j2194
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j278
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j231
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/pvc.j230
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j214
-rw-r--r--roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j236
-rw-r--r--roles/openshift_logging_elasticsearch/templates/passwd.j22
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml7
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml4
25 files changed, 935 insertions, 190 deletions
diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
deleted file mode 100644
index 567c9f289..000000000
--- a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: ClusterRole
-metadata:
- name: rolebinding-reader
-rules:
-- resources:
- - clusterrolebindings
- verbs:
- - get
diff --git a/roles/openshift_logging_elasticsearch/handlers/main.yml b/roles/openshift_logging_elasticsearch/handlers/main.yml
new file mode 100644
index 000000000..fa56897d0
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: "Restarting logging-{{ _cluster_component }} cluster"
+ listen: "restart elasticsearch"
+ include_tasks: restart_cluster.yml
+ with_items: "{{ _restart_logging_components }}"
+ loop_control:
+ loop_var: _cluster_component
+ when: not logging_elasticsearch_rollout_override | bool
+
+## Stop this from running more than once
+- set_fact:
+ logging_elasticsearch_rollout_override: True
+ listen: "restart elasticsearch"
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 6a9a6539c..e93d6b73e 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
index c53a06019..a7cc8f0ec 100644
--- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -10,8 +10,14 @@
# should we just assume that we will have the correct major version?
- set_fact: es_version="{{ openshift_logging_elasticsearch_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
- when: openshift_logging_elasticsearch_image_version != 'latest'
+ when:
+ - openshift_logging_elasticsearch_image_version != 'latest'
+ - not openshift_logging_es5_techpreview | default(false) | bool
- fail:
msg: Invalid version specified for Elasticsearch
- when: es_version not in __allowed_es_versions
+ when:
+ - es_version not in __allowed_es_versions
+ - not openshift_logging_es5_techpreview | default(false) | bool
+
+- include_tasks: get_es_version.yml
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
new file mode 100644
index 000000000..7c4ecaa5b
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -0,0 +1,42 @@
+---
+- command: >
+ {{ openshift_client_binary }} get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _cluster_pods
+
+- name: "Getting ES version for logging-es cluster"
+ command: >
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _curl_output
+ when: _cluster_pods.stdout_lines | count > 0
+
+- command: >
+ {{ openshift_client_binary }} get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _ops_cluster_pods
+
+- name: "Getting ES version for logging-es-ops cluster"
+ command: >
+ {{ openshift_client_binary }} exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _ops_curl_output
+ when: _ops_cluster_pods.stdout_lines | count > 0
+
+- set_fact:
+ _es_output: "{{ _curl_output.stdout | from_json }}"
+ when: _curl_output.stdout is defined
+
+- set_fact:
+ _es_ops_output: "{{ _ops_curl_output.stdout | from_json }}"
+ when: _ops_curl_output.stdout is defined
+
+- set_fact:
+ _es_installed_version: "{{ _es_output.version.number }}"
+ when:
+ - _es_output is defined
+ - _es_output.version is defined
+ - _es_output.version.number is defined
+
+- set_fact:
+ _es_ops_installed_version: "{{ _es_ops_output.version.number }}"
+ when:
+ - _es_ops_output is defined
+ - _es_ops_output.version is defined
+ - _es_ops_output.version.number is defined
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 8f2050043..64e5a3a1f 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -1,4 +1,11 @@
---
+- name: Ensure that ElasticSearch has nodes to run on
+ fail:
+ msg: |-
+ No schedulable nodes found matching node selector for Elasticsearch - '{{ openshift_logging_es_nodeselector }}'
+ when:
+ - openshift_schedulable_node_labels | lib_utils_oo_has_no_matching_selector(openshift_logging_es_nodeselector)
+
- name: Validate Elasticsearch cluster size
fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
@@ -15,11 +22,11 @@
elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- - "default_images.yml"
+ - "{{ openshift_deployment_type }}.yml"
+ - "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -32,6 +39,18 @@
- include_tasks: determine_version.yaml
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_installed_version is defined
+ - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_ops_installed_version is defined
+ - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -52,7 +71,6 @@
# we want to make sure we have all the necessary components here
# service account
-
- name: Create ES service account
oc_serviceaccount:
state: present
@@ -67,22 +85,17 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
-- copy:
- src: rolebinding-reader.yml
- dest: "{{ tempdir }}/rolebinding-reader.yml"
-
- name: Create rolebinding-reader role
- oc_obj:
+ oc_clusterrole:
state: present
- name: "rolebinding-reader"
- kind: clusterrole
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/rolebinding-reader.yml"
- delete_after: true
+ name: rolebinding-reader
+ rules:
+ - apiGroups: [""]
+ resources: ["clusterrolebindings"]
+ verbs: ["get"]
# SA roles
- name: Set rolebinding-reader permissions for ES
@@ -102,7 +115,7 @@
# logging-metrics-reader role
- template:
- src: logging-metrics-role.j2
+ src: "{{ __base_file_dir }}/logging-metrics-role.j2"
dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
vars:
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
@@ -111,7 +124,7 @@
- name: Create logging-metrics-reader-role
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
-n "{{ openshift_logging_elasticsearch_namespace }}"
create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
@@ -122,21 +135,31 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
+
+- set_fact:
+ _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+
+- template:
+ src: passwd.j2
+ dest: "{{mktemp.stdout}}/passwd.yml"
+ vars:
+ logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}"
+ logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}"
# View role and binding
- name: Generate logging-elasticsearch-view-role
template:
- src: rolebinding.j2
+ src: "{{ __base_file_dir }}/rolebinding.j2"
dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
vars:
obj_name: logging-elasticsearch-view-role
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -146,65 +169,108 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
- src: elasticsearch-logging.yml.j2
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- when: es_logging_contents is undefined
- changed_when: no
-
-- set_fact:
- __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
- __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
-
-- template:
- src: elasticsearch.yml.j2
+ src: "{{ __base_file_dir }}/elasticsearch.yml.j2"
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
-
- when: es_config_contents is undefined
- changed_when: no
-
-- copy:
- content: "{{ es_logging_contents }}"
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
changed_when: no
-- copy:
- content: "{{ es_config_contents }}"
- dest: "{{ tempdir }}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
+# create diff between current configmap files and our current files
+- when: not openshift_logging_es5_techpreview
+ block:
+ - template:
+ src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ vars:
+ root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ protected_lines: ["number_of_shards", "number_of_replicas"]
+ - current_file: "logging.yml"
+ new_file: "{{ tempdir }}/elasticsearch-logging.yml"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+ register: es_config_creation
+ notify: "restart elasticsearch"
+
+- when: openshift_logging_es5_techpreview | bool
+ block:
+ - template:
+ src: "{{ __base_file_dir }}/log4j2.properties.j2"
+ dest: "{{ tempdir }}/log4j2.properties"
+ vars:
+ root_logger: "{{ openshift_logging_es_log_appenders | list }}"
+ changed_when: no
+
+ - include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ - current_file: "log4j2.properties"
+ new_file: "{{ tempdir }}/log4j2.properties"
+
+ - name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ log4j2.properties: "{{ tempdir }}/log4j2.properties"
+ register: es_config_creation
+ notify: "restart elasticsearch"
+
+- when: es_config_creation.changed | bool
+ block:
+ - set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
-- name: Set ES configmap
- oc_configmap:
- state: present
- name: "{{ elasticsearch_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- from_file:
- elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
- logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+ - shell: >
+ {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+ register: _es_dcs
+ - set_fact:
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [_es_dcs.stdout] | unique }}"
+ when: _es_dcs.stdout != ""
# secret
- name: Set ES secret
@@ -213,22 +279,24 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: passwd.yml
+ path: "{{mktemp.stdout}}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -242,7 +310,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -255,8 +323,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -266,9 +334,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}"
provider: openshift
@@ -296,49 +364,49 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: "{{ __base_file_dir }}/pvc.j2"
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
- es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
when: openshift_logging_elasticsearch_deployment_name == ""
- set_fact:
@@ -348,7 +416,7 @@
# DC
- name: Set ES dc templates
template:
- src: es.j2
+ src: "{{ __base_file_dir }}/es.j2"
dest: "{{ tempdir }}/templates/logging-es-dc.yml"
vars:
es_cluster_name: "{{ es_component }}"
@@ -365,6 +433,9 @@
es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
+ basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
- name: Set ES dc
oc_obj:
@@ -373,50 +444,57 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
+ register: es_dc_creation
+ notify: "restart elasticsearch"
+
+- set_fact:
+ _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
+ _restart_logging_nodes: "{{ _restart_logging_nodes | default([]) + [es_deploy_name] | unique }}"
+ when: es_dc_creation.changed | bool
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
slurp:
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
template:
- src: route_reencrypt.j2
+ src: "{{ __base_file_dir }}/route_reencrypt.j2"
dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml"
vars:
obj_name: "logging-{{ es_component }}"
@@ -442,7 +520,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
new file mode 100644
index 000000000..14f2313e1
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -0,0 +1,113 @@
+---
+## get all pods for the cluster
+- command: >
+ {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _cluster_pods
+
+### Check for cluster state before making changes -- if its red then we don't want to continue
+- name: "Checking current health for {{ _es_node }} cluster"
+ shell: >
+ {{ openshift_client_binary }} exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+ register: _pod_status
+ when: _cluster_pods.stdout_lines | count > 0
+
+- when:
+ - _pod_status.stdout is defined
+ - (_pod_status.stdout | from_json)['status'] in ['red']
+ block:
+ - name: Set Logging message to manually restart
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_logging:
+ message: "Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart."
+
+ - debug: msg="Cluster logging-{{ _cluster_component }} was in a red state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart."
+
+- when: _pod_status.stdout is undefined or (_pod_status.stdout | from_json)['status'] in ['green', 'yellow']
+ block:
+ # Disable external communication for {{ _cluster_component }}
+ - name: Disable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ connection: blocked
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
+
+ - name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
+ command: >
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
+ register: _disable_output
+ changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
+ when: _cluster_pods.stdout_lines | count > 0
+
+ # Flush ES
+ - name: "Flushing for logging-{{ _cluster_component }} cluster"
+ command: >
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
+ register: _flush_output
+ changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
+ when:
+ - _cluster_pods.stdout_lines | count > 0
+ - full_restart_cluster | bool
+
+ - command: >
+ {{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _cluster_dcs
+
+ ## restart all dcs for full restart
+ - name: "Restart ES node {{ _es_node }}"
+ include_tasks: restart_es_node.yml
+ with_items: "{{ _cluster_dcs }}"
+ loop_control:
+ loop_var: _es_node
+ when:
+ - full_restart_cluster | bool
+
+ ## restart the node if it's dc is in the list of nodes to restart?
+ - name: "Restart ES node {{ _es_node }}"
+ include_tasks: restart_es_node.yml
+ with_items: "{{ _restart_logging_nodes }}"
+ loop_control:
+ loop_var: _es_node
+ when:
+ - not full_restart_cluster | bool
+ - _es_node in _cluster_dcs.stdout
+
+ ## we may need a new first pod to run against -- fetch them all again
+ - command: >
+ {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _cluster_pods
+
+ - name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
+ command: >
+ {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
+ register: _enable_output
+ changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
+
+ # Reenable external communication for {{ _cluster_component }}
+ - name: Reenable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
new file mode 100644
index 000000000..a1e172168
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -0,0 +1,37 @@
+---
+- name: "Rolling out new pod(s) for {{ _es_node }}"
+ command: >
+ {{ openshift_client_binary }} rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
+
+- name: "Waiting for {{ _es_node }} to finish scaling up"
+ oc_obj:
+ state: list
+ name: "{{ _es_node }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ register: _dc_output
+ until:
+ - _dc_output.results.results[0].status is defined
+ - _dc_output.results.results[0].status.readyReplicas is defined
+ - _dc_output.results.results[0].status.readyReplicas > 0
+ - _dc_output.results.results[0].status.updatedReplicas is defined
+ - _dc_output.results.results[0].status.updatedReplicas > 0
+ retries: 60
+ delay: 30
+
+- name: Gettings name(s) of replica pod(s)
+ command: >
+ {{ openshift_client_binary }} get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ register: _pods
+
+- name: "Waiting for ES to be ready for {{ _es_node }}"
+ shell: >
+ {{ openshift_client_binary }} exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+ with_items: "{{ _pods.stdout.split(' ') }}"
+ loop_control:
+ loop_var: _pod
+ register: _pod_status
+ until: (_pod_status.stdout | from_json)['status'] in ['green', 'yellow']
+ retries: 60
+ delay: 5
+ changed_when: false
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2
index c7b2b2721..c7b2b2721 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2
index 65b08d970..65b08d970 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2
index bf04094a3..e3315adc8 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2
@@ -17,6 +17,7 @@ spec:
logging-infra: "{{logging_component}}"
strategy:
type: Recreate
+ triggers: []
template:
metadata:
name: "{{deploy_name}}"
@@ -40,42 +41,7 @@ spec:
{% endfor %}
{% endif %}
containers:
- - name: proxy
- image: {{ proxy_image }}
- imagePullPolicy: IfNotPresent
- args:
- - --upstream-ca=/etc/elasticsearch/secret/admin-ca
- - --https-address=:4443
- - -provider=openshift
- - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
- - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- - -cookie-secret={{ 16 | oo_random_word | b64encode }}
- - -upstream=https://localhost:9200
- - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
- - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
- - --tls-cert=/etc/tls/private/tls.crt
- - --tls-key=/etc/tls/private/tls.key
- - -pass-access-token
- - -pass-user-headers
- ports:
- - containerPort: 4443
- name: proxy
- protocol: TCP
- volumeMounts:
- - mountPath: /etc/tls/private
- name: proxy-tls
- readOnly: true
- - mountPath: /etc/elasticsearch/secret
- name: elasticsearch
- readOnly: true
- resources:
- limits:
- memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
- requests:
- cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
- memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
- -
- name: "elasticsearch"
+ - name: "elasticsearch"
image: {{image}}
imagePullPolicy: IfNotPresent
resources:
@@ -163,6 +129,42 @@ spec:
initialDelaySeconds: 10
timeoutSeconds: 30
periodSeconds: 5
+ -
+ name: proxy
+ image: {{ proxy_image }}
+ imagePullPolicy: IfNotPresent
+ args:
+ - --upstream-ca=/etc/elasticsearch/secret/admin-ca
+ - --https-address=:4443
+ - -provider=openshift
+ - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
+ - -basic-auth-password={{ basic_auth_passwd }}
+ - -upstream=https://localhost:9200
+ - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
+ - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
+ - --tls-cert=/etc/tls/private/tls.crt
+ - --tls-key=/etc/tls/private/tls.key
+ - -pass-access-token
+ - -pass-user-headers
+ ports:
+ - containerPort: 4443
+ name: proxy
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: proxy-tls
+ readOnly: true
+ - mountPath: /etc/elasticsearch/secret
+ name: elasticsearch
+ readOnly: true
+ resources:
+ limits:
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+ requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
volumes:
- name: proxy-tls
secret:
diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2
index d9800e5a5..d9800e5a5 100644
--- a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2
index 3c6896df4..3c6896df4 100644
--- a/roles/openshift_logging_elasticsearch/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2
index fcd4e87cc..fcd4e87cc 100644
--- a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2
index d2e8b8bcb..d2e8b8bcb 100644
--- a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2
new file mode 100644
index 000000000..009471d2c
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2
@@ -0,0 +1,74 @@
+cluster:
+ name: ${CLUSTER_NAME}
+
+script:
+ inline: true
+ stored: true
+
+node:
+ name: ${DC_NAME}
+ master: ${IS_MASTER}
+ data: ${HAS_DATA}
+ max_local_storage_nodes: 1
+
+network:
+ host: 0.0.0.0
+
+cloud:
+ kubernetes:
+ service: ${SERVICE_DNS}
+ namespace: ${NAMESPACE}
+
+discovery.zen:
+ hosts_provider: kubernetes
+ minimum_master_nodes: ${NODE_QUORUM}
+
+gateway:
+ recover_after_nodes: ${NODE_QUORUM}
+ expected_nodes: ${RECOVER_EXPECTED_NODES}
+ recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+
+openshift.config:
+ use_common_data_model: true
+ project_index_prefix: "project"
+ time_field_name: "@timestamp"
+
+openshift.searchguard:
+ keystore.path: /etc/elasticsearch/secret/admin.jks
+ truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}}
+
+openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}}
+
+path:
+ data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+ logs: /elasticsearch/${CLUSTER_NAME}/logs
+
+searchguard:
+ authcz.admin_dn:
+ - CN=system.admin,OU=OpenShift,O=Logging
+ config_index_name: ".searchguard.${DC_NAME}"
+ ssl:
+ transport:
+ enabled: true
+ enforce_hostname_verification: false
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+ keystore_password: kspass
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+ truststore_password: tspass
+ http:
+ enabled: true
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/key
+ keystore_password: kspass
+ clientauth_mode: OPTIONAL
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/truststore
+ truststore_password: tspass
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2
new file mode 100644
index 000000000..8685b7849
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2
@@ -0,0 +1,194 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ type: Recreate
+ triggers: []
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-elasticsearch
+ securityContext:
+ supplementalGroups:
+{% for group in es_storage_groups %}
+ - {{group}}
+{% endfor %}
+{% if es_node_selector is iterable and es_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in es_node_selector.items() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "elasticsearch"
+ image: {{image}}
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
+ cpu: "{{es_cpu_limit}}"
+{% endif %}
+ memory: "{{es_memory_limit}}"
+ requests:
+ cpu: "{{es_cpu_request}}"
+ memory: "{{es_memory_limit}}"
+{% if es_container_security_context %}
+ securityContext: {{ es_container_security_context | to_yaml }}
+{% endif %}
+ ports:
+ -
+ containerPort: 9200
+ name: "restapi"
+ -
+ containerPort: 9300
+ name: "cluster"
+ env:
+ -
+ name: "DC_NAME"
+ value: "{{deploy_name}}"
+ -
+ name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ -
+ name: "KUBERNETES_TRUST_CERT"
+ value: "true"
+ -
+ name: "SERVICE_DNS"
+ value: "logging-{{es_cluster_name}}-cluster"
+ -
+ name: "CLUSTER_NAME"
+ value: "logging-{{es_cluster_name}}"
+ -
+ name: "INSTANCE_RAM"
+ value: "{{openshift_logging_elasticsearch_memory_limit}}"
+ -
+ name: "HEAP_DUMP_LOCATION"
+ value: "/elasticsearch/persistent/heapdump.hprof"
+ -
+ name: "NODE_QUORUM"
+ value: "{{es_node_quorum | int}}"
+ -
+ name: "RECOVER_EXPECTED_NODES"
+ value: "{{es_recover_expected_nodes}}"
+ -
+ name: "RECOVER_AFTER_TIME"
+ value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+ -
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
+ name: "POD_LABEL"
+ value: "component={{component}}"
+ -
+ name: "IS_MASTER"
+ value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+ -
+ name: "HAS_DATA"
+ value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+ -
+ name: "PROMETHEUS_USER"
+ value: "{{openshift_logging_elasticsearch_prometheus_sa}}"
+
+ -
+ name: "PRIMARY_SHARDS"
+ value: "{{ es_number_of_shards | default ('1') }}"
+
+ -
+ name: "REPLICA_SHARDS"
+ value: "{{ es_number_of_replicas | default ('0') }}"
+
+ volumeMounts:
+ - name: elasticsearch
+ mountPath: /etc/elasticsearch/secret
+ readOnly: true
+ - name: elasticsearch-config
+ mountPath: /usr/share/java/elasticsearch/config
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
+ periodSeconds: 5
+ -
+ name: proxy
+ image: {{ proxy_image }}
+ imagePullPolicy: IfNotPresent
+ args:
+ - --upstream-ca=/etc/elasticsearch/secret/admin-ca
+ - --https-address=:4443
+ - -provider=openshift
+ - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
+ - -upstream=https://localhost:9200
+ - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
+ - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
+ - --tls-cert=/etc/tls/private/tls.crt
+ - --tls-key=/etc/tls/private/tls.key
+ - -pass-access-token
+ - -pass-user-headers
+ ports:
+ - containerPort: 4443
+ name: proxy
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: proxy-tls
+ readOnly: true
+ - mountPath: /etc/elasticsearch/secret
+ name: elasticsearch
+ readOnly: true
+ resources:
+ limits:
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+ requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+ volumes:
+ - name: proxy-tls
+ secret:
+ secretName: prometheus-tls
+ - name: elasticsearch
+ secret:
+ secretName: logging-elasticsearch
+ - name: elasticsearch-config
+ configMap:
+ name: logging-elasticsearch
+ - name: elasticsearch-storage
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+ hostPath:
+ path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2
new file mode 100644
index 000000000..1e78e4ea0
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2
@@ -0,0 +1,78 @@
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+
+rootLogger.level = info
+{% if 'console' in root_logger %}
+rootLogger.appenderRef.console.ref = console
+{% endif %}
+{% if 'file' in root_logger %}
+rootLogger.appenderRef.rolling.ref = rolling
+{% endif %}
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2
new file mode 100644
index 000000000..d9800e5a5
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2
@@ -0,0 +1,31 @@
+---
+apiVersion: v1
+kind: List
+items:
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: Role
+ metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ rules:
+ - apiGroups:
+ - metrics.openshift.io
+ resources:
+ - prometheus
+ verbs:
+ - view
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: prometheus-metrics-viewer
+ subjects:
+ - kind: ServiceAccount
+ namespace: {{ role_namespace }}
+ name: {{ role_user }}
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2
new file mode 100644
index 000000000..3c6896df4
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.items() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.items() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2
new file mode 100644
index 000000000..fcd4e87cc
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+ name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% endfor %}
diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2
new file mode 100644
index 000000000..d2e8b8bcb
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2
@@ -0,0 +1,36 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.items() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ route_host }}
+ tls:
+{% if tls_key is defined and tls_key | length > 0 %}
+ key: |
+{{ tls_key|indent(6, true) }}
+{% if tls_cert is defined and tls_cert | length > 0 %}
+ certificate: |
+{{ tls_cert|indent(6, true) }}
+{% endif %}
+{% endif %}
+ caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ termination: reencrypt
+{% if edge_term_policy is defined and edge_term_policy | length > 0 %}
+ insecureEdgeTerminationPolicy: {{ edge_term_policy }}
+{% endif %}
+ to:
+ kind: Service
+ name: {{ service_name }}
diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2
new file mode 100644
index 000000000..a22151eef
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2
@@ -0,0 +1,2 @@
+"{{logging_user_name}}":
+ passwd: "{{logging_user_passwd}}"
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 09e2ee4d0..41c1c748d 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,10 +1,12 @@
---
-__latest_es_version: "3_6"
-__allowed_es_versions: ["3_5", "3_6", "3_7"]
+__latest_es_version: "3_10"
+__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9", "3_10"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
+__es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key"
+
# TODO: integrate these
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
@@ -12,3 +14,4 @@ es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int
es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+full_restart_cluster: False
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
index 07d92896f..0cf48a66b 100644
--- a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -1,5 +1,5 @@
---
__openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"
__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default ('v3.7') }}"
+__openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_image_version | default (openshift_image_tag) }}"