From d5879135f077e4aaaa09c8e4ebf5d9ef2a063a78 Mon Sep 17 00:00:00 2001 From: Eric Wolinetz Date: Fri, 5 Jan 2018 11:06:43 -0600 Subject: Adding support for ES 5.x tech preview opt in --- playbooks/openshift-logging/private/config.yml | 41 +++- roles/openshift_logging/defaults/main.yml | 2 + roles/openshift_logging/tasks/install_logging.yaml | 8 + roles/openshift_logging/tasks/main.yaml | 5 + .../files/2.x/curator.yml | 18 ++ .../files/5.x/curator.yml | 18 ++ roles/openshift_logging_curator/files/curator.yml | 18 -- roles/openshift_logging_curator/tasks/main.yaml | 4 +- .../templates/2.x/curator.j2 | 113 ++++++++++ .../templates/5.x/curator.j2 | 113 ++++++++++ .../openshift_logging_curator/templates/curator.j2 | 113 ---------- .../files/rolebinding-reader.yml | 9 - .../tasks/determine_version.yaml | 8 +- .../tasks/main.yaml | 133 ++++++----- .../templates/2.x/elasticsearch-logging.yml.j2 | 105 +++++++++ .../templates/2.x/elasticsearch.yml.j2 | 87 +++++++ .../templates/2.x/es.j2 | 187 ++++++++++++++++ .../templates/2.x/logging-metrics-role.j2 | 31 +++ .../templates/2.x/pvc.j2 | 30 +++ .../templates/2.x/rolebinding.j2 | 14 ++ .../templates/2.x/route_reencrypt.j2 | 36 +++ .../templates/5.x/elasticsearch.yml.j2 | 74 ++++++ .../templates/5.x/es.j2 | 194 ++++++++++++++++ .../templates/5.x/log4j2.properties.j2 | 78 +++++++ .../templates/5.x/logging-metrics-role.j2 | 31 +++ .../templates/5.x/pvc.j2 | 30 +++ .../templates/5.x/rolebinding.j2 | 14 ++ .../templates/5.x/route_reencrypt.j2 | 36 +++ .../templates/elasticsearch-logging.yml.j2 | 105 --------- .../templates/elasticsearch.yml.j2 | 87 ------- .../templates/es.j2 | 187 ---------------- .../templates/logging-metrics-role.j2 | 31 --- .../templates/pvc.j2 | 30 --- .../templates/rolebinding.j2 | 14 -- .../templates/route_reencrypt.j2 | 36 --- .../openshift_logging_elasticsearch/vars/main.yml | 1 - .../files/2.x/eventrouter-template.yaml | 103 +++++++++ .../files/5.x/eventrouter-template.yaml | 103 +++++++++ .../files/eventrouter-template.yaml | 103 --------- .../tasks/install_eventrouter.yaml | 2 +- .../templates/5.x/eventrouter-template.j2 | 109 +++++++++ .../templates/eventrouter-template.j2 | 109 --------- .../files/2.x/fluentd-throttle-config.yaml | 7 + .../files/2.x/secure-forward.conf | 26 +++ .../files/5.x/fluentd-throttle-config.yaml | 7 + .../files/5.x/secure-forward.conf | 26 +++ .../files/fluentd-throttle-config.yaml | 7 - .../files/secure-forward.conf | 26 --- roles/openshift_logging_fluentd/tasks/main.yaml | 8 +- .../templates/2.x/fluent.conf.j2 | 80 +++++++ .../templates/2.x/fluentd.j2 | 249 +++++++++++++++++++++ .../templates/5.x/fluent.conf.j2 | 80 +++++++ .../templates/5.x/fluentd.j2 | 249 +++++++++++++++++++++ .../templates/fluent.conf.j2 | 80 ------- .../openshift_logging_fluentd/templates/fluentd.j2 | 249 --------------------- roles/openshift_logging_kibana/tasks/main.yaml | 6 +- .../templates/2.x/kibana.j2 | 176 +++++++++++++++ .../templates/2.x/oauth-client.j2 | 16 ++ .../templates/2.x/route_reencrypt.j2 | 36 +++ .../templates/5.x/kibana.j2 | 170 ++++++++++++++ .../templates/5.x/oauth-client.j2 | 16 ++ .../templates/5.x/route_reencrypt.j2 | 36 +++ roles/openshift_logging_kibana/templates/kibana.j2 | 176 --------------- .../templates/oauth-client.j2 | 16 -- .../templates/route_reencrypt.j2 | 36 --- roles/openshift_logging_mux/files/2.x/fluent.conf | 37 +++ .../files/2.x/secure-forward.conf | 26 +++ roles/openshift_logging_mux/files/5.x/fluent.conf | 37 +++ .../files/5.x/secure-forward.conf | 26 +++ roles/openshift_logging_mux/files/fluent.conf | 37 --- .../files/secure-forward.conf | 26 --- roles/openshift_logging_mux/tasks/main.yaml | 6 +- roles/openshift_logging_mux/templates/2.x/mux.j2 | 202 +++++++++++++++++ roles/openshift_logging_mux/templates/5.x/mux.j2 | 202 +++++++++++++++++ roles/openshift_logging_mux/templates/mux.j2 | 202 ----------------- 75 files changed, 3377 insertions(+), 1772 deletions(-) create mode 100644 roles/openshift_logging_curator/files/2.x/curator.yml create mode 100644 roles/openshift_logging_curator/files/5.x/curator.yml delete mode 100644 roles/openshift_logging_curator/files/curator.yml create mode 100644 roles/openshift_logging_curator/templates/2.x/curator.j2 create mode 100644 roles/openshift_logging_curator/templates/5.x/curator.j2 delete mode 100644 roles/openshift_logging_curator/templates/curator.j2 delete mode 100644 roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/es.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/es.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 create mode 100644 roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/es.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/pvc.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/rolebinding.j2 delete mode 100644 roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 create mode 100644 roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml create mode 100644 roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml delete mode 100644 roles/openshift_logging_eventrouter/files/eventrouter-template.yaml create mode 100644 roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 delete mode 100644 roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 create mode 100644 roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml create mode 100644 roles/openshift_logging_fluentd/files/2.x/secure-forward.conf create mode 100644 roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml create mode 100644 roles/openshift_logging_fluentd/files/5.x/secure-forward.conf delete mode 100644 roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml delete mode 100644 roles/openshift_logging_fluentd/files/secure-forward.conf create mode 100644 roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 create mode 100644 roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 create mode 100644 roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 create mode 100644 roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 delete mode 100644 roles/openshift_logging_fluentd/templates/fluent.conf.j2 delete mode 100644 roles/openshift_logging_fluentd/templates/fluentd.j2 create mode 100644 roles/openshift_logging_kibana/templates/2.x/kibana.j2 create mode 100644 roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 create mode 100644 roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 create mode 100644 roles/openshift_logging_kibana/templates/5.x/kibana.j2 create mode 100644 roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 create mode 100644 roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 delete mode 100644 roles/openshift_logging_kibana/templates/kibana.j2 delete mode 100644 roles/openshift_logging_kibana/templates/oauth-client.j2 delete mode 100644 roles/openshift_logging_kibana/templates/route_reencrypt.j2 create mode 100644 roles/openshift_logging_mux/files/2.x/fluent.conf create mode 100644 roles/openshift_logging_mux/files/2.x/secure-forward.conf create mode 100644 roles/openshift_logging_mux/files/5.x/fluent.conf create mode 100644 roles/openshift_logging_mux/files/5.x/secure-forward.conf delete mode 100644 roles/openshift_logging_mux/files/fluent.conf delete mode 100644 roles/openshift_logging_mux/files/secure-forward.conf create mode 100644 roles/openshift_logging_mux/templates/2.x/mux.j2 create mode 100644 roles/openshift_logging_mux/templates/5.x/mux.j2 delete mode 100644 roles/openshift_logging_mux/templates/mux.j2 diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index 07aa8bfde..f2a57f9f8 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -11,6 +11,38 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- name: Update vm.max_map_count for ES 5.x + hosts: all + gather_facts: false + tasks: + - when: + - openshift_logging_es5_techpreview | default(false) | bool + - openshift_deployment_type in ['origin'] + block: + - name: Checking vm max_map_count value + command: + cat /proc/sys/vm/max_map_count + register: _vm_max_map_count + + - stat: + path: /etc/sysctl.d/99-elasticsearch.conf + register: _99_es_conf + + - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf + command: > + sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/' + register: _curr_vm_max_map_count + when: _99_es_conf.stat.exists + + - name: Updating vm.max_map_count value + sysctl: + name: vm.max_map_count + value: 262144 + sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf" + reload: yes + when: + - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144 + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: @@ -20,11 +52,10 @@ - name: Update Master configs hosts: oo_masters:!oo_first_master tasks: - - block: - - import_role: - name: openshift_logging - tasks_from: update_master_config - when: not openshift.common.version_gte_3_9 + - include_role: + name: openshift_logging + tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 2f1aa061f..e887fd691 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -12,6 +12,8 @@ openshift_logging_install_logging: False openshift_logging_purge_logging: False openshift_logging_image_pull_secret: "" +openshift_logging_es5_techpreview: False + openshift_logging_curator_default_days: 30 openshift_logging_curator_run_hour: 0 openshift_logging_curator_run_minute: 0 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index c905502ac..9fabc5826 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -59,6 +59,14 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" +- set_fact: + __base_file_dir: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}" + __es_version: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}" + +- set_fact: + openshift_logging_image_version: "techpreview" + when: openshift_logging_es5_techpreview | bool + ## Elasticsearch - set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 60cc399fa..57426bc77 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,6 +3,11 @@ msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 +- assert: + that: openshift_deployment_type in ['origin'] + msg: "Only 'origin' deployments are allowed with openshift_logging_es5_techpreview set to true" + when: openshift_logging_es5_techpreview | bool + - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX register: mktemp diff --git a/roles/openshift_logging_curator/files/2.x/curator.yml b/roles/openshift_logging_curator/files/2.x/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging_curator/files/2.x/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +# delete: +# days: 30 +# runhour: 0 +# runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +# delete: +# weeks: 8 + +# example for a normal project +#myapp: +# delete: +# weeks: 1 diff --git a/roles/openshift_logging_curator/files/5.x/curator.yml b/roles/openshift_logging_curator/files/5.x/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging_curator/files/5.x/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +# delete: +# days: 30 +# runhour: 0 +# runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +# delete: +# weeks: 8 + +# example for a normal project +#myapp: +# delete: +# weeks: 1 diff --git a/roles/openshift_logging_curator/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml deleted file mode 100644 index 8d62d8e7d..000000000 --- a/roles/openshift_logging_curator/files/curator.yml +++ /dev/null @@ -1,18 +0,0 @@ -# Logging example curator config file - -# uncomment and use this to override the defaults from env vars -#.defaults: -# delete: -# days: 30 -# runhour: 0 -# runminute: 0 - -# to keep ops logs for a different duration: -#.operations: -# delete: -# weeks: 8 - -# example for a normal project -#myapp: -# delete: -# weeks: 1 diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index cc68998f5..6e8605d28 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -52,7 +52,7 @@ # configmap - copy: - src: curator.yml + src: "{{ __base_file_dir }}/curator.yml" dest: "{{ tempdir }}/curator.yml" changed_when: no @@ -96,7 +96,7 @@ # TODO: scale should not exceed 1 - name: Generate Curator deploymentconfig template: - src: curator.j2 + src: "{{ __base_file_dir }}/curator.j2" dest: "{{ tempdir }}/templates/curator-dc.yaml" vars: component: "{{ curator_component }}" diff --git a/roles/openshift_logging_curator/templates/2.x/curator.j2 b/roles/openshift_logging_curator/templates/2.x/curator.j2 new file mode 100644 index 000000000..8acff8141 --- /dev/null +++ b/roles/openshift_logging_curator/templates/2.x/curator.j2 @@ -0,0 +1,113 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{curator_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} + nodeSelector: +{% for key, value in curator_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - + name: "curator" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + resources: +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %} + limits: +{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %} + cpu: "{{curator_cpu_limit}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + requests: +{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %} + cpu: "{{curator_cpu_request}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + env: + - + name: "K8S_HOST_URL" + value: "{{openshift_logging_curator_master_url}}" + - + name: "ES_HOST" + value: "{{es_host}}" + - + name: "ES_PORT" + value: "{{es_port}}" + - + name: "ES_CLIENT_CERT" + value: "/etc/curator/keys/cert" + - + name: "ES_CLIENT_KEY" + value: "/etc/curator/keys/key" + - + name: "ES_CA" + value: "/etc/curator/keys/ca" + - + name: "CURATOR_DEFAULT_DAYS" + value: "{{openshift_logging_curator_default_days}}" + - + name: "CURATOR_RUN_HOUR" + value: "{{openshift_logging_curator_run_hour}}" + - + name: "CURATOR_RUN_MINUTE" + value: "{{openshift_logging_curator_run_minute}}" + - + name: "CURATOR_RUN_TIMEZONE" + value: "{{openshift_logging_curator_run_timezone}}" + - + name: "CURATOR_SCRIPT_LOG_LEVEL" + value: "{{openshift_logging_curator_script_log_level}}" + - + name: "CURATOR_LOG_LEVEL" + value: "{{openshift_logging_curator_log_level}}" + volumeMounts: + - name: certs + mountPath: /etc/curator/keys + readOnly: true + - name: config + mountPath: /etc/curator/settings + readOnly: true + volumes: + - name: certs + secret: + secretName: logging-curator + - name: config + configMap: + name: logging-curator diff --git a/roles/openshift_logging_curator/templates/5.x/curator.j2 b/roles/openshift_logging_curator/templates/5.x/curator.j2 new file mode 100644 index 000000000..8acff8141 --- /dev/null +++ b/roles/openshift_logging_curator/templates/5.x/curator.j2 @@ -0,0 +1,113 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{curator_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} + nodeSelector: +{% for key, value in curator_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - + name: "curator" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + resources: +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %} + limits: +{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %} + cpu: "{{curator_cpu_limit}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + requests: +{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %} + cpu: "{{curator_cpu_request}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + env: + - + name: "K8S_HOST_URL" + value: "{{openshift_logging_curator_master_url}}" + - + name: "ES_HOST" + value: "{{es_host}}" + - + name: "ES_PORT" + value: "{{es_port}}" + - + name: "ES_CLIENT_CERT" + value: "/etc/curator/keys/cert" + - + name: "ES_CLIENT_KEY" + value: "/etc/curator/keys/key" + - + name: "ES_CA" + value: "/etc/curator/keys/ca" + - + name: "CURATOR_DEFAULT_DAYS" + value: "{{openshift_logging_curator_default_days}}" + - + name: "CURATOR_RUN_HOUR" + value: "{{openshift_logging_curator_run_hour}}" + - + name: "CURATOR_RUN_MINUTE" + value: "{{openshift_logging_curator_run_minute}}" + - + name: "CURATOR_RUN_TIMEZONE" + value: "{{openshift_logging_curator_run_timezone}}" + - + name: "CURATOR_SCRIPT_LOG_LEVEL" + value: "{{openshift_logging_curator_script_log_level}}" + - + name: "CURATOR_LOG_LEVEL" + value: "{{openshift_logging_curator_log_level}}" + volumeMounts: + - name: certs + mountPath: /etc/curator/keys + readOnly: true + - name: config + mountPath: /etc/curator/settings + readOnly: true + volumes: + - name: certs + secret: + secretName: logging-curator + - name: config + configMap: + name: logging-curator diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 deleted file mode 100644 index 8acff8141..000000000 --- a/roles/openshift_logging_curator/templates/curator.j2 +++ /dev/null @@ -1,113 +0,0 @@ -apiVersion: "v1" -kind: "DeploymentConfig" -metadata: - name: "{{deploy_name}}" - labels: - provider: openshift - component: "{{component}}" - logging-infra: "{{logging_component}}" -spec: - replicas: {{curator_replicas|default(1)}} - selector: - provider: openshift - component: "{{component}}" - logging-infra: "{{logging_component}}" - strategy: - rollingParams: - intervalSeconds: 1 - timeoutSeconds: 600 - updatePeriodSeconds: 1 - type: Recreate - template: - metadata: - name: "{{deploy_name}}" - labels: - logging-infra: "{{logging_component}}" - provider: openshift - component: "{{component}}" - spec: - terminationGracePeriod: 600 - serviceAccountName: aggregated-logging-curator -{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} - nodeSelector: -{% for key, value in curator_node_selector.items() %} - {{key}}: "{{value}}" -{% endfor %} -{% endif %} - containers: - - - name: "curator" - image: {{image}} - imagePullPolicy: IfNotPresent -{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} - resources: -{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %} - limits: -{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %} - cpu: "{{curator_cpu_limit}}" -{% endif %} -{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} - memory: "{{curator_memory_limit}}" -{% endif %} -{% endif %} -{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} - requests: -{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %} - cpu: "{{curator_cpu_request}}" -{% endif %} -{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} - memory: "{{curator_memory_limit}}" -{% endif %} -{% endif %} -{% endif %} - env: - - - name: "K8S_HOST_URL" - value: "{{openshift_logging_curator_master_url}}" - - - name: "ES_HOST" - value: "{{es_host}}" - - - name: "ES_PORT" - value: "{{es_port}}" - - - name: "ES_CLIENT_CERT" - value: "/etc/curator/keys/cert" - - - name: "ES_CLIENT_KEY" - value: "/etc/curator/keys/key" - - - name: "ES_CA" - value: "/etc/curator/keys/ca" - - - name: "CURATOR_DEFAULT_DAYS" - value: "{{openshift_logging_curator_default_days}}" - - - name: "CURATOR_RUN_HOUR" - value: "{{openshift_logging_curator_run_hour}}" - - - name: "CURATOR_RUN_MINUTE" - value: "{{openshift_logging_curator_run_minute}}" - - - name: "CURATOR_RUN_TIMEZONE" - value: "{{openshift_logging_curator_run_timezone}}" - - - name: "CURATOR_SCRIPT_LOG_LEVEL" - value: "{{openshift_logging_curator_script_log_level}}" - - - name: "CURATOR_LOG_LEVEL" - value: "{{openshift_logging_curator_log_level}}" - volumeMounts: - - name: certs - mountPath: /etc/curator/keys - readOnly: true - - name: config - mountPath: /etc/curator/settings - readOnly: true - volumes: - - name: certs - secret: - secretName: logging-curator - - name: config - configMap: - name: logging-curator diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml deleted file mode 100644 index 567c9f289..000000000 --- a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ClusterRole -metadata: - name: rolebinding-reader -rules: -- resources: - - clusterrolebindings - verbs: - - get diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml index c55e7c5ea..a7cc8f0ec 100644 --- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -10,10 +10,14 @@ # should we just assume that we will have the correct major version? - set_fact: es_version="{{ openshift_logging_elasticsearch_image_version | regex_replace('^v?(?P\d)\.(?P\d).*$', '3_\\g') }}" - when: openshift_logging_elasticsearch_image_version != 'latest' + when: + - openshift_logging_elasticsearch_image_version != 'latest' + - not openshift_logging_es5_techpreview | default(false) | bool - fail: msg: Invalid version specified for Elasticsearch - when: es_version not in __allowed_es_versions + when: + - es_version not in __allowed_es_versions + - not openshift_logging_es5_techpreview | default(false) | bool - include_tasks: get_es_version.yml diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index b731d93a0..8a174f0d5 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -64,7 +64,6 @@ # we want to make sure we have all the necessary components here # service account - - name: Create ES service account oc_serviceaccount: state: present @@ -82,19 +81,14 @@ - openshift_logging_image_pull_secret == '' # rolebinding reader -- copy: - src: rolebinding-reader.yml - dest: "{{ tempdir }}/rolebinding-reader.yml" - - name: Create rolebinding-reader role - oc_obj: + oc_clusterrole: state: present - name: "rolebinding-reader" - kind: clusterrole - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - files: - - "{{ tempdir }}/rolebinding-reader.yml" - delete_after: true + name: rolebinding-reader + rules: + - apiGroups: [""] + resources: ["clusterrolebindings"] + verbs: ["get"] # SA roles - name: Set rolebinding-reader permissions for ES @@ -114,7 +108,7 @@ # logging-metrics-reader role - template: - src: logging-metrics-role.j2 + src: "{{ __base_file_dir }}/logging-metrics-role.j2" dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml" vars: namespace: "{{ openshift_logging_elasticsearch_namespace }}" @@ -150,7 +144,7 @@ # View role and binding - name: Generate logging-elasticsearch-view-role template: - src: rolebinding.j2 + src: "{{ __base_file_dir }}/rolebinding.j2" dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml" vars: obj_name: logging-elasticsearch-view-role @@ -183,51 +177,80 @@ msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}" - template: - src: elasticsearch-logging.yml.j2 - dest: "{{ tempdir }}/elasticsearch-logging.yml" - vars: - root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}" - when: es_logging_contents is undefined - changed_when: no - -- template: - src: elasticsearch.yml.j2 + src: "{{ __base_file_dir }}/elasticsearch.yml.j2" dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" - - when: es_config_contents is undefined changed_when: no # create diff between current configmap files and our current files -# NOTE: include_role must be used instead of import_role because -# this task file is looped over from another role. -- include_role: - name: openshift_logging - tasks_from: patch_configmap_files.yaml - vars: - configmap_name: "logging-elasticsearch" - configmap_namespace: "logging" - configmap_file_names: - - current_file: "elasticsearch.yml" - new_file: "{{ tempdir }}/elasticsearch.yml" - protected_lines: ["number_of_shards", "number_of_replicas"] - - current_file: "logging.yml" - new_file: "{{ tempdir }}/elasticsearch-logging.yml" - -- name: Set ES configmap - oc_configmap: - state: present - name: "{{ elasticsearch_name }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - from_file: - elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" - logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" - register: es_config_creation - notify: "restart elasticsearch" +- when: not openshift_logging_es5_techpreview + block: + - template: + src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2" + dest: "{{ tempdir }}/elasticsearch-logging.yml" + vars: + root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}" + changed_when: no + + - include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + protected_lines: ["number_of_shards", "number_of_replicas"] + - current_file: "logging.yml" + new_file: "{{ tempdir }}/elasticsearch-logging.yml" + + - name: Set ES configmap + oc_configmap: + state: present + name: "{{ elasticsearch_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + from_file: + elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" + logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" + register: es_config_creation + notify: "restart elasticsearch" + +- when: openshift_logging_es5_techpreview | bool + block: + - template: + src: "{{ __base_file_dir }}/log4j2.properties.j2" + dest: "{{ tempdir }}/log4j2.properties" + vars: + root_logger: "{{ openshift_logging_es_log_appenders | list }}" + changed_when: no + + - include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + - current_file: "log4j2.properties" + new_file: "{{ tempdir }}/log4j2.properties" + + - name: Set ES configmap + oc_configmap: + state: present + name: "{{ elasticsearch_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + from_file: + elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" + log4j2.properties: "{{ tempdir }}/log4j2.properties" + register: es_config_creation + notify: "restart elasticsearch" - when: es_config_creation.changed | bool block: @@ -341,7 +364,7 @@ # storageclasses with the storageClassName set to "" in pvc.j2 - name: Creating ES storage template - static template: - src: pvc.j2 + src: "{{ __base_file_dir }}/pvc.j2" dest: "{{ tempdir }}/templates/logging-es-pvc.yml" vars: obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" @@ -355,7 +378,7 @@ # Storageclasses are used by default if configured - name: Creating ES storage template - dynamic template: - src: pvc.j2 + src: "{{ __base_file_dir }}/pvc.j2" dest: "{{ tempdir }}/templates/logging-es-pvc.yml" vars: obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" @@ -386,7 +409,7 @@ # DC - name: Set ES dc templates template: - src: es.j2 + src: "{{ __base_file_dir }}/es.j2" dest: "{{ tempdir }}/templates/logging-es-dc.yml" vars: es_cluster_name: "{{ es_component }}" @@ -404,6 +427,8 @@ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" - name: Set ES dc oc_obj: @@ -462,7 +487,7 @@ - name: Generating Elasticsearch {{ es_component }} route template template: - src: route_reencrypt.j2 + src: "{{ __base_file_dir }}/route_reencrypt.j2" dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml" vars: obj_name: "logging-{{ es_component }}" diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 new file mode 100644 index 000000000..c7b2b2721 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 @@ -0,0 +1,105 @@ +# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +es.logger.level: INFO +rootLogger: ${es.logger.level}, {{root_logger}} +logger: + # log action execution errors for easier debugging + action: WARN + + # + # deprecation logging, turn to DEBUG to see them + deprecation: WARN, deprecation_log_file + + # reduce the logging for aws, too much is logged under the default INFO + com.amazonaws: WARN + + io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL} + io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} + + # aws will try to do some sketchy JMX stuff, but its not needed. + com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR + com.amazonaws.metrics.AwsSdkMetrics: ERROR + + org.apache.http: INFO + + # gateway + #gateway: DEBUG + #index.gateway: DEBUG + + # peer shard recovery + #indices.recovery: DEBUG + + # discovery + #discovery: TRACE + + index.search.slowlog: TRACE, index_search_slow_log_file + index.indexing.slowlog: TRACE, index_indexing_slow_log_file + + # search-guard + com.floragunn.searchguard: WARN + +additivity: + index.search.slowlog: false + index.indexing.slowlog: false + deprecation: false + +appender: + console: + type: console + layout: + type: consolePattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.1000m%n" + # need this filter until https://github.com/openshift/origin/issues/14515 is fixed + filter: + 1: + type: org.apache.log4j.varia.StringMatchFilter + StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" + AcceptOnMatch: false + + file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + # need this filter until https://github.com/openshift/origin/issues/14515 is fixed + filter: + 1: + type: org.apache.log4j.varia.StringMatchFilter + StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" + AcceptOnMatch: false + + # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. + # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html + #file: + #type: extrasRollingFile + #file: ${path.logs}/${cluster.name}.log + #rollingPolicy: timeBased + #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz + #layout: + #type: pattern + #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + deprecation_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_deprecation.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_search_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_search_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_indexing_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 new file mode 100644 index 000000000..65b08d970 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 @@ -0,0 +1,87 @@ +cluster: + name: ${CLUSTER_NAME} + +script: + inline: on + indexed: on + +index: + number_of_shards: {{ es_number_of_shards | default ('1') }} + number_of_replicas: {{ es_number_of_replicas | default ('0') }} + unassigned.node_left.delayed_timeout: 2m + translog: + flush_threshold_size: 256mb + flush_threshold_period: 5m + +node: + name: ${DC_NAME} + master: ${IS_MASTER} + data: ${HAS_DATA} + max_local_storage_nodes: 1 + +network: + host: 0.0.0.0 + +cloud: + kubernetes: + pod_label: ${POD_LABEL} + pod_port: 9300 + namespace: ${NAMESPACE} + +discovery: + type: kubernetes + zen.ping.multicast.enabled: false + zen.minimum_master_nodes: ${NODE_QUORUM} + +gateway: + recover_after_nodes: ${NODE_QUORUM} + expected_nodes: ${RECOVER_EXPECTED_NODES} + recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] +io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json + +openshift.config: + use_common_data_model: true + project_index_prefix: "project" + time_field_name: "@timestamp" + +openshift.searchguard: + keystore.path: /etc/elasticsearch/secret/admin.jks + truststore.path: /etc/elasticsearch/secret/searchguard.truststore + +openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}} + +openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}} + +path: + data: /elasticsearch/persistent/${CLUSTER_NAME}/data + logs: /elasticsearch/${CLUSTER_NAME}/logs + work: /elasticsearch/${CLUSTER_NAME}/work + scripts: /elasticsearch/${CLUSTER_NAME}/scripts + +searchguard: + authcz.admin_dn: + - CN=system.admin,OU=OpenShift,O=Logging + config_index_name: ".searchguard.${DC_NAME}" + ssl: + transport: + enabled: true + enforce_hostname_verification: false + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/searchguard.key + keystore_password: kspass + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore + truststore_password: tspass + http: + enabled: true + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/key + keystore_password: kspass + clientauth_mode: OPTIONAL + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/truststore + truststore_password: tspass diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/es.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2 new file mode 100644 index 000000000..b1d6a4489 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2 @@ -0,0 +1,187 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{es_replicas|default(1)}} + revisionHistoryLimit: 0 + selector: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" + strategy: + type: Recreate + triggers: [] + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-elasticsearch + securityContext: + supplementalGroups: +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} + nodeSelector: +{% for key, value in es_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: proxy + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -basic-auth-password={{ basic_auth_passwd }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + - + name: "elasticsearch" + image: {{image}} + imagePullPolicy: IfNotPresent + resources: + limits: +{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %} + cpu: "{{es_cpu_limit}}" +{% endif %} + memory: "{{es_memory_limit}}" + requests: + cpu: "{{es_cpu_request}}" + memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} + ports: + - + containerPort: 9200 + name: "restapi" + - + containerPort: 9300 + name: "cluster" + env: + - + name: "DC_NAME" + value: "{{deploy_name}}" + - + name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - + name: "KUBERNETES_TRUST_CERT" + value: "true" + - + name: "SERVICE_DNS" + value: "logging-{{es_cluster_name}}-cluster" + - + name: "CLUSTER_NAME" + value: "logging-{{es_cluster_name}}" + - + name: "INSTANCE_RAM" + value: "{{openshift_logging_elasticsearch_memory_limit}}" + - + name: "HEAP_DUMP_LOCATION" + value: "/elasticsearch/persistent/heapdump.hprof" + - + name: "NODE_QUORUM" + value: "{{es_node_quorum | int}}" + - + name: "RECOVER_EXPECTED_NODES" + value: "{{es_recover_expected_nodes}}" + - + name: "RECOVER_AFTER_TIME" + value: "{{openshift_logging_elasticsearch_recover_after_time}}" + - + name: "READINESS_PROBE_TIMEOUT" + value: "30" + - + name: "POD_LABEL" + value: "component={{component}}" + - + name: "IS_MASTER" + value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" + + - + name: "HAS_DATA" + value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" + + volumeMounts: + - name: elasticsearch + mountPath: /etc/elasticsearch/secret + readOnly: true + - name: elasticsearch-config + mountPath: /usr/share/java/elasticsearch/config + readOnly: true + - name: elasticsearch-storage + mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/java/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 10 + timeoutSeconds: 30 + periodSeconds: 5 + volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls + - name: elasticsearch + secret: + secretName: logging-elasticsearch + - name: elasticsearch-config + configMap: + name: logging-elasticsearch + - name: elasticsearch-storage +{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_elasticsearch_pvc_name }} +{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} + hostPath: + path: {{ openshift_logging_elasticsearch_hostmount_path }} +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 new file mode 100644 index 000000000..3c6896df4 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{obj_name}} + labels: + logging-infra: support +{% if annotations is defined %} + annotations: +{% for key,value in annotations.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} + accessModes: +{% for mode in access_modes %} + - {{ mode }} +{% endfor %} + resources: + requests: + storage: {{size}} +{% if storage_class_name is defined %} + storageClassName: {{ storage_class_name }} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 new file mode 100644 index 000000000..fcd4e87cc --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: RoleBinding +metadata: + name: {{obj_name}} +roleRef: +{% if roleRef.kind is defined %} + kind: {{ roleRef.kind }} +{% endif %} + name: {{ roleRef.name }} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} +{% endfor %} diff --git a/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 new file mode 100644 index 000000000..009471d2c --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 @@ -0,0 +1,74 @@ +cluster: + name: ${CLUSTER_NAME} + +script: + inline: true + stored: true + +node: + name: ${DC_NAME} + master: ${IS_MASTER} + data: ${HAS_DATA} + max_local_storage_nodes: 1 + +network: + host: 0.0.0.0 + +cloud: + kubernetes: + service: ${SERVICE_DNS} + namespace: ${NAMESPACE} + +discovery.zen: + hosts_provider: kubernetes + minimum_master_nodes: ${NODE_QUORUM} + +gateway: + recover_after_nodes: ${NODE_QUORUM} + expected_nodes: ${RECOVER_EXPECTED_NODES} + recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json + +openshift.config: + use_common_data_model: true + project_index_prefix: "project" + time_field_name: "@timestamp" + +openshift.searchguard: + keystore.path: /etc/elasticsearch/secret/admin.jks + truststore.path: /etc/elasticsearch/secret/searchguard.truststore + +openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}} + +openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}} + +path: + data: /elasticsearch/persistent/${CLUSTER_NAME}/data + logs: /elasticsearch/${CLUSTER_NAME}/logs + +searchguard: + authcz.admin_dn: + - CN=system.admin,OU=OpenShift,O=Logging + config_index_name: ".searchguard.${DC_NAME}" + ssl: + transport: + enabled: true + enforce_hostname_verification: false + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/searchguard.key + keystore_password: kspass + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore + truststore_password: tspass + http: + enabled: true + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/key + keystore_password: kspass + clientauth_mode: OPTIONAL + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/truststore + truststore_password: tspass diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 new file mode 100644 index 000000000..bcfd6f273 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 @@ -0,0 +1,194 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{es_replicas|default(1)}} + revisionHistoryLimit: 0 + selector: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" + strategy: + type: Recreate + triggers: [] + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-elasticsearch + securityContext: + supplementalGroups: +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} + nodeSelector: +{% for key, value in es_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: proxy + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + - + name: "elasticsearch" + image: {{image}} + imagePullPolicy: IfNotPresent + resources: + limits: +{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %} + cpu: "{{es_cpu_limit}}" +{% endif %} + memory: "{{es_memory_limit}}" + requests: + cpu: "{{es_cpu_request}}" + memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} + ports: + - + containerPort: 9200 + name: "restapi" + - + containerPort: 9300 + name: "cluster" + env: + - + name: "DC_NAME" + value: "{{deploy_name}}" + - + name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - + name: "KUBERNETES_TRUST_CERT" + value: "true" + - + name: "SERVICE_DNS" + value: "logging-{{es_cluster_name}}-cluster" + - + name: "CLUSTER_NAME" + value: "logging-{{es_cluster_name}}" + - + name: "INSTANCE_RAM" + value: "{{openshift_logging_elasticsearch_memory_limit}}" + - + name: "HEAP_DUMP_LOCATION" + value: "/elasticsearch/persistent/heapdump.hprof" + - + name: "NODE_QUORUM" + value: "{{es_node_quorum | int}}" + - + name: "RECOVER_EXPECTED_NODES" + value: "{{es_recover_expected_nodes}}" + - + name: "RECOVER_AFTER_TIME" + value: "{{openshift_logging_elasticsearch_recover_after_time}}" + - + name: "READINESS_PROBE_TIMEOUT" + value: "30" + - + name: "POD_LABEL" + value: "component={{component}}" + - + name: "IS_MASTER" + value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" + + - + name: "HAS_DATA" + value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" + + - + name: "PRIMARY_SHARDS" + value: "{{ es_number_of_shards | default ('1') }}" + + - + name: "REPLICA_SHARDS" + value: "{{ es_number_of_replicas | default ('0') }}" + + volumeMounts: + - name: elasticsearch + mountPath: /etc/elasticsearch/secret + readOnly: true + - name: elasticsearch-config + mountPath: /usr/share/java/elasticsearch/config + readOnly: true + - name: elasticsearch-storage + mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 10 + timeoutSeconds: 30 + periodSeconds: 5 + volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls + - name: elasticsearch + secret: + secretName: logging-elasticsearch + - name: elasticsearch-config + configMap: + name: logging-elasticsearch + - name: elasticsearch-storage +{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_elasticsearch_pvc_name }} +{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} + hostPath: + path: {{ openshift_logging_elasticsearch_hostmount_path }} +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 new file mode 100644 index 000000000..1e78e4ea0 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 @@ -0,0 +1,78 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +{% if 'console' in root_logger %} +rootLogger.appenderRef.console.ref = console +{% endif %} +{% if 'file' in root_logger %} +rootLogger.appenderRef.rolling.ref = rolling +{% endif %} + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 new file mode 100644 index 000000000..3c6896df4 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{obj_name}} + labels: + logging-infra: support +{% if annotations is defined %} + annotations: +{% for key,value in annotations.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} + accessModes: +{% for mode in access_modes %} + - {{ mode }} +{% endfor %} + resources: + requests: + storage: {{size}} +{% if storage_class_name is defined %} + storageClassName: {{ storage_class_name }} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 new file mode 100644 index 000000000..fcd4e87cc --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: RoleBinding +metadata: + name: {{obj_name}} +roleRef: +{% if roleRef.kind is defined %} + kind: {{ roleRef.kind }} +{% endif %} + name: {{ roleRef.name }} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} +{% endfor %} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 deleted file mode 100644 index c7b2b2721..000000000 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 +++ /dev/null @@ -1,105 +0,0 @@ -# you can override this using by setting a system property, for example -Des.logger.level=DEBUG -es.logger.level: INFO -rootLogger: ${es.logger.level}, {{root_logger}} -logger: - # log action execution errors for easier debugging - action: WARN - - # - # deprecation logging, turn to DEBUG to see them - deprecation: WARN, deprecation_log_file - - # reduce the logging for aws, too much is logged under the default INFO - com.amazonaws: WARN - - io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL} - io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} - - # aws will try to do some sketchy JMX stuff, but its not needed. - com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR - com.amazonaws.metrics.AwsSdkMetrics: ERROR - - org.apache.http: INFO - - # gateway - #gateway: DEBUG - #index.gateway: DEBUG - - # peer shard recovery - #indices.recovery: DEBUG - - # discovery - #discovery: TRACE - - index.search.slowlog: TRACE, index_search_slow_log_file - index.indexing.slowlog: TRACE, index_indexing_slow_log_file - - # search-guard - com.floragunn.searchguard: WARN - -additivity: - index.search.slowlog: false - index.indexing.slowlog: false - deprecation: false - -appender: - console: - type: console - layout: - type: consolePattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.1000m%n" - # need this filter until https://github.com/openshift/origin/issues/14515 is fixed - filter: - 1: - type: org.apache.log4j.varia.StringMatchFilter - StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" - AcceptOnMatch: false - - file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - # need this filter until https://github.com/openshift/origin/issues/14515 is fixed - filter: - 1: - type: org.apache.log4j.varia.StringMatchFilter - StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" - AcceptOnMatch: false - - # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. - # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html - #file: - #type: extrasRollingFile - #file: ${path.logs}/${cluster.name}.log - #rollingPolicy: timeBased - #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz - #layout: - #type: pattern - #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - deprecation_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_deprecation.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_search_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_search_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" - - index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 deleted file mode 100644 index 65b08d970..000000000 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ /dev/null @@ -1,87 +0,0 @@ -cluster: - name: ${CLUSTER_NAME} - -script: - inline: on - indexed: on - -index: - number_of_shards: {{ es_number_of_shards | default ('1') }} - number_of_replicas: {{ es_number_of_replicas | default ('0') }} - unassigned.node_left.delayed_timeout: 2m - translog: - flush_threshold_size: 256mb - flush_threshold_period: 5m - -node: - name: ${DC_NAME} - master: ${IS_MASTER} - data: ${HAS_DATA} - max_local_storage_nodes: 1 - -network: - host: 0.0.0.0 - -cloud: - kubernetes: - pod_label: ${POD_LABEL} - pod_port: 9300 - namespace: ${NAMESPACE} - -discovery: - type: kubernetes - zen.ping.multicast.enabled: false - zen.minimum_master_nodes: ${NODE_QUORUM} - -gateway: - recover_after_nodes: ${NODE_QUORUM} - expected_nodes: ${RECOVER_EXPECTED_NODES} - recover_after_time: ${RECOVER_AFTER_TIME} - -io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] -io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json -io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json -io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json - -openshift.config: - use_common_data_model: true - project_index_prefix: "project" - time_field_name: "@timestamp" - -openshift.searchguard: - keystore.path: /etc/elasticsearch/secret/admin.jks - truststore.path: /etc/elasticsearch/secret/searchguard.truststore - -openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}} - -openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}} - -path: - data: /elasticsearch/persistent/${CLUSTER_NAME}/data - logs: /elasticsearch/${CLUSTER_NAME}/logs - work: /elasticsearch/${CLUSTER_NAME}/work - scripts: /elasticsearch/${CLUSTER_NAME}/scripts - -searchguard: - authcz.admin_dn: - - CN=system.admin,OU=OpenShift,O=Logging - config_index_name: ".searchguard.${DC_NAME}" - ssl: - transport: - enabled: true - enforce_hostname_verification: false - keystore_type: JKS - keystore_filepath: /etc/elasticsearch/secret/searchguard.key - keystore_password: kspass - truststore_type: JKS - truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore - truststore_password: tspass - http: - enabled: true - keystore_type: JKS - keystore_filepath: /etc/elasticsearch/secret/key - keystore_password: kspass - clientauth_mode: OPTIONAL - truststore_type: JKS - truststore_filepath: /etc/elasticsearch/secret/truststore - truststore_password: tspass diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 deleted file mode 100644 index b1d6a4489..000000000 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ /dev/null @@ -1,187 +0,0 @@ -apiVersion: "v1" -kind: "DeploymentConfig" -metadata: - name: "{{deploy_name}}" - labels: - provider: openshift - component: "{{component}}" - deployment: "{{deploy_name}}" - logging-infra: "{{logging_component}}" -spec: - replicas: {{es_replicas|default(1)}} - revisionHistoryLimit: 0 - selector: - provider: openshift - component: "{{component}}" - deployment: "{{deploy_name}}" - logging-infra: "{{logging_component}}" - strategy: - type: Recreate - triggers: [] - template: - metadata: - name: "{{deploy_name}}" - labels: - logging-infra: "{{logging_component}}" - provider: openshift - component: "{{component}}" - deployment: "{{deploy_name}}" - spec: - terminationGracePeriod: 600 - serviceAccountName: aggregated-logging-elasticsearch - securityContext: - supplementalGroups: -{% for group in es_storage_groups %} - - {{group}} -{% endfor %} -{% if es_node_selector is iterable and es_node_selector | length > 0 %} - nodeSelector: -{% for key, value in es_node_selector.items() %} - {{key}}: "{{value}}" -{% endfor %} -{% endif %} - containers: - - name: proxy - image: {{ proxy_image }} - imagePullPolicy: IfNotPresent - args: - - --upstream-ca=/etc/elasticsearch/secret/admin-ca - - --https-address=:4443 - - -provider=openshift - - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} - - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token - - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} - - -basic-auth-password={{ basic_auth_passwd }} - - -upstream=https://localhost:9200 - - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' - - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - -pass-access-token - - -pass-user-headers - ports: - - containerPort: 4443 - name: proxy - protocol: TCP - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - readOnly: true - - mountPath: /etc/elasticsearch/secret - name: elasticsearch - readOnly: true - resources: - limits: - memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - requests: - cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" - memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - - - name: "elasticsearch" - image: {{image}} - imagePullPolicy: IfNotPresent - resources: - limits: -{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %} - cpu: "{{es_cpu_limit}}" -{% endif %} - memory: "{{es_memory_limit}}" - requests: - cpu: "{{es_cpu_request}}" - memory: "{{es_memory_limit}}" -{% if es_container_security_context %} - securityContext: {{ es_container_security_context | to_yaml }} -{% endif %} - ports: - - - containerPort: 9200 - name: "restapi" - - - containerPort: 9300 - name: "cluster" - env: - - - name: "DC_NAME" - value: "{{deploy_name}}" - - - name: "NAMESPACE" - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - - name: "KUBERNETES_TRUST_CERT" - value: "true" - - - name: "SERVICE_DNS" - value: "logging-{{es_cluster_name}}-cluster" - - - name: "CLUSTER_NAME" - value: "logging-{{es_cluster_name}}" - - - name: "INSTANCE_RAM" - value: "{{openshift_logging_elasticsearch_memory_limit}}" - - - name: "HEAP_DUMP_LOCATION" - value: "/elasticsearch/persistent/heapdump.hprof" - - - name: "NODE_QUORUM" - value: "{{es_node_quorum | int}}" - - - name: "RECOVER_EXPECTED_NODES" - value: "{{es_recover_expected_nodes}}" - - - name: "RECOVER_AFTER_TIME" - value: "{{openshift_logging_elasticsearch_recover_after_time}}" - - - name: "READINESS_PROBE_TIMEOUT" - value: "30" - - - name: "POD_LABEL" - value: "component={{component}}" - - - name: "IS_MASTER" - value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" - - - - name: "HAS_DATA" - value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" - - - name: "PROMETHEUS_USER" - value: "{{openshift_logging_elasticsearch_prometheus_sa}}" - - volumeMounts: - - name: elasticsearch - mountPath: /etc/elasticsearch/secret - readOnly: true - - name: elasticsearch-config - mountPath: /usr/share/java/elasticsearch/config - readOnly: true - - name: elasticsearch-storage - mountPath: /elasticsearch/persistent - readinessProbe: - exec: - command: - - "/usr/share/java/elasticsearch/probe/readiness.sh" - initialDelaySeconds: 10 - timeoutSeconds: 30 - periodSeconds: 5 - volumes: - - name: proxy-tls - secret: - secretName: prometheus-tls - - name: elasticsearch - secret: - secretName: logging-elasticsearch - - name: elasticsearch-config - configMap: - name: logging-elasticsearch - - name: elasticsearch-storage -{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} - persistentVolumeClaim: - claimName: {{ openshift_logging_elasticsearch_pvc_name }} -{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} - hostPath: - path: {{ openshift_logging_elasticsearch_hostmount_path }} -{% else %} - emptydir: {} -{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 deleted file mode 100644 index d9800e5a5..000000000 --- a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 +++ /dev/null @@ -1,31 +0,0 @@ ---- -apiVersion: v1 -kind: List -items: -- apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: Role - metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - name: prometheus-metrics-viewer - namespace: {{ namespace }} - rules: - - apiGroups: - - metrics.openshift.io - resources: - - prometheus - verbs: - - view -- apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: RoleBinding - metadata: - name: prometheus-metrics-viewer - namespace: {{ namespace }} - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: prometheus-metrics-viewer - subjects: - - kind: ServiceAccount - namespace: {{ role_namespace }} - name: {{ role_user }} diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2 deleted file mode 100644 index 3c6896df4..000000000 --- a/roles/openshift_logging_elasticsearch/templates/pvc.j2 +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{obj_name}} - labels: - logging-infra: support -{% if annotations is defined %} - annotations: -{% for key,value in annotations.items() %} - {{key}}: {{value}} -{% endfor %} -{% endif %} -spec: -{% if pv_selector is defined and pv_selector is mapping %} - selector: - matchLabels: -{% for key,value in pv_selector.items() %} - {{key}}: {{value}} -{% endfor %} -{% endif %} - accessModes: -{% for mode in access_modes %} - - {{ mode }} -{% endfor %} - resources: - requests: - storage: {{size}} -{% if storage_class_name is defined %} - storageClassName: {{ storage_class_name }} -{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 deleted file mode 100644 index fcd4e87cc..000000000 --- a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: RoleBinding -metadata: - name: {{obj_name}} -roleRef: -{% if roleRef.kind is defined %} - kind: {{ roleRef.kind }} -{% endif %} - name: {{ roleRef.name }} -subjects: -{% for sub in subjects %} - - kind: {{ sub.kind }} - name: {{ sub.name }} -{% endfor %} diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 deleted file mode 100644 index d2e8b8bcb..000000000 --- a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: "v1" -kind: "Route" -metadata: - name: "{{obj_name}}" -{% if labels is defined%} - labels: -{% for key, value in labels.items() %} - {{key}}: {{value}} -{% endfor %} -{% endif %} -spec: - host: {{ route_host }} - tls: -{% if tls_key is defined and tls_key | length > 0 %} - key: | -{{ tls_key|indent(6, true) }} -{% if tls_cert is defined and tls_cert | length > 0 %} - certificate: | -{{ tls_cert|indent(6, true) }} -{% endif %} -{% endif %} - caCertificate: | -{% for line in tls_ca_cert.split('\n') %} - {{ line }} -{% endfor %} - destinationCACertificate: | -{% for line in tls_dest_ca_cert.split('\n') %} - {{ line }} -{% endfor %} - termination: reencrypt -{% if edge_term_policy is defined and edge_term_policy | length > 0 %} - insecureEdgeTerminationPolicy: {{ edge_term_policy }} -{% endif %} - to: - kind: Service - name: {{ service_name }} diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 122231031..aa8d583ba 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -4,7 +4,6 @@ __allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] -__es_version: "2.4.4" __es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key" diff --git a/roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml new file mode 100644 index 000000000..cc01c010d --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml new file mode 100644 index 000000000..cc01c010d --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml deleted file mode 100644 index cc01c010d..000000000 --- a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml +++ /dev/null @@ -1,103 +0,0 @@ -# this openshift template should match (except nodeSelector) jinja2 template in -# ../templates/eventrouter-template.j2 -kind: Template -apiVersion: v1 -metadata: - name: eventrouter-template - annotations: - description: "A pod forwarding kubernetes events to EFK aggregated logging stack." - tags: "events,EFK,logging" -objects: - - kind: ServiceAccount - apiVersion: v1 - metadata: - name: aggregated-logging-eventrouter - - kind: ClusterRole - apiVersion: v1 - metadata: - name: event-reader - rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "watch", "list"] - - kind: ConfigMap - apiVersion: v1 - metadata: - name: logging-eventrouter - data: - config.json: |- - { - "sink": "${SINK}" - } - - kind: DeploymentConfig - apiVersion: v1 - metadata: - name: logging-eventrouter - labels: - component: eventrouter - logging-infra: eventrouter - provider: openshift - spec: - selector: - component: eventrouter - logging-infra: eventrouter - provider: openshift - replicas: ${REPLICAS} - template: - metadata: - labels: - component: eventrouter - logging-infra: eventrouter - provider: openshift - name: logging-eventrouter - spec: - serviceAccount: aggregated-logging-eventrouter - serviceAccountName: aggregated-logging-eventrouter - containers: - - name: kube-eventrouter - image: ${IMAGE} - imagePullPolicy: IfNotPresent - resources: - limits: - memory: ${MEMORY} - cpu: ${CPU} - requires: - memory: ${MEMORY} - volumeMounts: - - name: config-volume - mountPath: /etc/eventrouter - volumes: - - name: config-volume - configMap: - name: logging-eventrouter - - kind: ClusterRoleBinding - apiVersion: v1 - metadata: - name: event-reader-binding - subjects: - - kind: ServiceAccount - name: aggregated-logging-eventrouter - namespace: ${NAMESPACE} - roleRef: - kind: ClusterRole - name: event-reader - -parameters: - - name: SINK - displayName: Sink - value: stdout - - name: REPLICAS - displayName: Replicas - value: "1" - - name: IMAGE - displayName: Image - value: "docker.io/openshift/origin-logging-eventrouter:latest" - - name: MEMORY - displayName: Memory - value: "128Mi" - - name: CPU - displayName: CPU - value: "100m" - - name: NAMESPACE - displayName: Namespace - value: default diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml index cbbc6a8ec..fffdd9f8b 100644 --- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml +++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml @@ -23,7 +23,7 @@ # create EventRouter deployment config - name: Generate EventRouter template template: - src: eventrouter-template.j2 + src: "{{ __base_file_dir }}/eventrouter-template.j2" dest: "{{ tempdir }}/templates/eventrouter-template.yaml" vars: node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 new file mode 100644 index 000000000..3bd29163b --- /dev/null +++ b/roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 @@ -0,0 +1,109 @@ +# this jinja2 template should always match (except nodeSelector) openshift template in +# ../files/eventrouter-template.yaml +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: "${{ '{{' }}REPLICAS{{ '}}' }}" + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + requires: + cpu: ${CPU} + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 deleted file mode 100644 index 3bd29163b..000000000 --- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 +++ /dev/null @@ -1,109 +0,0 @@ -# this jinja2 template should always match (except nodeSelector) openshift template in -# ../files/eventrouter-template.yaml -kind: Template -apiVersion: v1 -metadata: - name: eventrouter-template - annotations: - description: "A pod forwarding kubernetes events to EFK aggregated logging stack." - tags: "events,EFK,logging" -objects: - - kind: ServiceAccount - apiVersion: v1 - metadata: - name: aggregated-logging-eventrouter - - kind: ClusterRole - apiVersion: v1 - metadata: - name: event-reader - rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "watch", "list"] - - kind: ConfigMap - apiVersion: v1 - metadata: - name: logging-eventrouter - data: - config.json: |- - { - "sink": "${SINK}" - } - - kind: DeploymentConfig - apiVersion: v1 - metadata: - name: logging-eventrouter - labels: - component: eventrouter - logging-infra: eventrouter - provider: openshift - spec: - selector: - component: eventrouter - logging-infra: eventrouter - provider: openshift - replicas: "${{ '{{' }}REPLICAS{{ '}}' }}" - template: - metadata: - labels: - component: eventrouter - logging-infra: eventrouter - provider: openshift - name: logging-eventrouter - spec: - serviceAccount: aggregated-logging-eventrouter - serviceAccountName: aggregated-logging-eventrouter -{% if node_selector is iterable and node_selector | length > 0 %} - nodeSelector: -{% for key, value in node_selector.items() %} - {{ key }}: "{{ value }}" -{% endfor %} -{% endif %} - containers: - - name: kube-eventrouter - image: ${IMAGE} - imagePullPolicy: IfNotPresent - resources: - limits: - memory: ${MEMORY} - requires: - cpu: ${CPU} - memory: ${MEMORY} - volumeMounts: - - name: config-volume - mountPath: /etc/eventrouter - volumes: - - name: config-volume - configMap: - name: logging-eventrouter - - kind: ClusterRoleBinding - apiVersion: v1 - metadata: - name: event-reader-binding - subjects: - - kind: ServiceAccount - name: aggregated-logging-eventrouter - namespace: ${NAMESPACE} - roleRef: - kind: ClusterRole - name: event-reader - -parameters: - - name: SINK - displayName: Sink - value: stdout - - name: REPLICAS - displayName: Replicas - value: "1" - - name: IMAGE - displayName: Image - value: "docker.io/openshift/origin-logging-eventrouter:latest" - - name: MEMORY - displayName: Memory - value: "128Mi" - - name: CPU - displayName: CPU - value: "100m" - - name: NAMESPACE - displayName: Namespace - value: default diff --git a/roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +# read_lines_limit: 10 +# +#.operations: +# read_lines_limit: 100 diff --git a/roles/openshift_logging_fluentd/files/2.x/secure-forward.conf b/roles/openshift_logging_fluentd/files/2.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/2.x/secure-forward.conf @@ -0,0 +1,26 @@ +# +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# + # or IP +# host server.fqdn.example.com +# port 24284 +# +# + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# +# diff --git a/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +# read_lines_limit: 10 +# +#.operations: +# read_lines_limit: 100 diff --git a/roles/openshift_logging_fluentd/files/5.x/secure-forward.conf b/roles/openshift_logging_fluentd/files/5.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/5.x/secure-forward.conf @@ -0,0 +1,26 @@ +# +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# + # or IP +# host server.fqdn.example.com +# port 24284 +# +# + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# +# diff --git a/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml deleted file mode 100644 index 375621ff1..000000000 --- a/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Logging example fluentd throttling config file - -#example-project: -# read_lines_limit: 10 -# -#.operations: -# read_lines_limit: 100 diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf deleted file mode 100644 index 87410c1c5..000000000 --- a/roles/openshift_logging_fluentd/files/secure-forward.conf +++ /dev/null @@ -1,26 +0,0 @@ -# -# @type secure_forward - -# self_hostname ${HOSTNAME} -# shared_key - -# secure yes -# enable_strict_verification yes - -# ca_cert_path /etc/fluent/keys/your_ca_cert -# ca_private_key_path /etc/fluent/keys/your_private_key - # for private CA secret key -# ca_private_key_passphrase passphrase - -# - # or IP -# host server.fqdn.example.com -# port 24284 -# -# - # ip address to connect -# host 203.0.113.8 - # specify hostlabel for FQDN verification if ipaddress is used for host -# hostlabel server.fqdn.example.com -# -# diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 79ebbca08..ef1c53de3 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -104,17 +104,17 @@ # create Fluentd configmap - template: - src: fluent.conf.j2 + src: "{{ __base_file_dir }}/fluent.conf.j2" dest: "{{ tempdir }}/fluent.conf" vars: deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" - copy: - src: fluentd-throttle-config.yaml + src: "{{ __base_file_dir }}/fluentd-throttle-config.yaml" dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - copy: - src: secure-forward.conf + src: "{{ __base_file_dir }}/secure-forward.conf" dest: "{{ tempdir }}/secure-forward.conf" - import_role: @@ -161,7 +161,7 @@ # TODO: pass in aggregation configurations - name: Generate logging-fluentd daemonset definition template: - src: fluentd.j2 + src: "{{ __base_file_dir }}/fluentd.j2" dest: "{{ tempdir }}/templates/logging-fluentd.yaml" vars: daemonset_name: logging-fluentd diff --git a/roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 new file mode 100644 index 000000000..6e07b403a --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 @@ -0,0 +1,80 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +{% if deploy_type in ['hosted', 'secure-aggregator'] %} +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## +{% else %} + + @type secure_forward + @label @INGRESS + + self_hostname ${HOSTNAME} + bind 0.0.0.0 + port {{openshift_logging_fluentd_aggregating_port}} + + shared_key {{openshift_logging_fluentd_shared_key}} + + secure {{openshift_logging_fluentd_aggregating_secure}} + enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} + ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}} + ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} + ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + + + host {{openshift_logging_fluentd_aggregating_host}} + + +{% endif %} + + + + diff --git a/roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 b/roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 new file mode 100644 index 000000000..c6256cf49 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 @@ -0,0 +1,249 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: + name: "{{ daemonset_name }}" + labels: + provider: openshift + component: "{{ daemonset_component }}" + logging-infra: "{{ daemonset_component }}" +spec: + selector: + matchLabels: + provider: openshift + component: "{{ daemonset_component }}" + updateStrategy: + type: RollingUpdate + rollingUpdate: + minReadySeconds: 600 + template: + metadata: + name: "{{ daemonset_container_name }}" + labels: + logging-infra: "{{ daemonset_component }}" + provider: openshift + component: "{{ daemonset_component }}" + spec: + serviceAccountName: "{{ daemonset_serviceAccount }}" + nodeSelector: + {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" + containers: + - name: "{{ daemonset_container_name }}" + image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + resources: +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %} + limits: +{% if fluentd_cpu_limit is not none %} + cpu: "{{fluentd_cpu_limit}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + requests: +{% if fluentd_cpu_request is not none %} + cpu: "{{fluentd_cpu_request}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + volumeMounts: + - name: runlogjournal + mountPath: /run/log/journal + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: dockercfg + mountPath: /etc/sysconfig/docker + readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true +{% endif %} + env: + - name: "K8S_HOST_URL" + value: "{{ openshift_logging_fluentd_master_url }}" + - name: "ES_HOST" + value: "{{ app_host }}" + - name: "ES_PORT" + value: "{{ app_port }}" + - name: "ES_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_app_client_cert }}" + - name: "ES_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_app_client_key }}" + - name: "ES_CA" + value: "{{ openshift_logging_fluentd_app_ca }}" + - name: "OPS_HOST" + value: "{{ ops_host }}" + - name: "OPS_PORT" + value: "{{ ops_port }}" + - name: "OPS_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_ops_client_cert }}" + - name: "OPS_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_ops_client_key }}" + - name: "OPS_CA" + value: "{{ openshift_logging_fluentd_ops_ca }}" + - name: "JOURNAL_SOURCE" + value: "{{ openshift_logging_fluentd_journal_source | default('') }}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_size_limit }}" + - name: "FLUENTD_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.cpu + - name: "FLUENTD_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}" +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: "MUX_CLIENT_MODE" + value: "{{ openshift_logging_mux_client_mode }}" +{% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + +{% if audit_container_engine %} + - name: "AUDIT_CONTAINER_ENGINE" + value: "{{ audit_container_engine | lower }}" +{% endif %} + +{% if audit_container_engine %} + - name: "NODE_NAME" + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + +{% if audit_log_file != '' %} + - name: AUDIT_FILE + value: "{{ audit_log_file }}" +{% endif %} + +{% if audit_pos_log_file != '' %} + - name: AUDIT_POS_FILE + value: "{{ audit_pos_log_file }}" +{% endif %} + + volumes: + - name: runlogjournal + hostPath: + path: /run/log/journal + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: config + configMap: + name: logging-fluentd + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: dockercfg + hostPath: + path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + secret: + secretName: logging-mux +{% endif %} + - name: filebufferstorage + hostPath: + path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 new file mode 100644 index 000000000..6e07b403a --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 @@ -0,0 +1,80 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +{% if deploy_type in ['hosted', 'secure-aggregator'] %} +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## +{% else %} + + @type secure_forward + @label @INGRESS + + self_hostname ${HOSTNAME} + bind 0.0.0.0 + port {{openshift_logging_fluentd_aggregating_port}} + + shared_key {{openshift_logging_fluentd_shared_key}} + + secure {{openshift_logging_fluentd_aggregating_secure}} + enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} + ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}} + ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} + ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + + + host {{openshift_logging_fluentd_aggregating_host}} + + +{% endif %} + + + + diff --git a/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 b/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 new file mode 100644 index 000000000..c6256cf49 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 @@ -0,0 +1,249 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: + name: "{{ daemonset_name }}" + labels: + provider: openshift + component: "{{ daemonset_component }}" + logging-infra: "{{ daemonset_component }}" +spec: + selector: + matchLabels: + provider: openshift + component: "{{ daemonset_component }}" + updateStrategy: + type: RollingUpdate + rollingUpdate: + minReadySeconds: 600 + template: + metadata: + name: "{{ daemonset_container_name }}" + labels: + logging-infra: "{{ daemonset_component }}" + provider: openshift + component: "{{ daemonset_component }}" + spec: + serviceAccountName: "{{ daemonset_serviceAccount }}" + nodeSelector: + {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" + containers: + - name: "{{ daemonset_container_name }}" + image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + resources: +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %} + limits: +{% if fluentd_cpu_limit is not none %} + cpu: "{{fluentd_cpu_limit}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + requests: +{% if fluentd_cpu_request is not none %} + cpu: "{{fluentd_cpu_request}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + volumeMounts: + - name: runlogjournal + mountPath: /run/log/journal + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: dockercfg + mountPath: /etc/sysconfig/docker + readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true +{% endif %} + env: + - name: "K8S_HOST_URL" + value: "{{ openshift_logging_fluentd_master_url }}" + - name: "ES_HOST" + value: "{{ app_host }}" + - name: "ES_PORT" + value: "{{ app_port }}" + - name: "ES_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_app_client_cert }}" + - name: "ES_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_app_client_key }}" + - name: "ES_CA" + value: "{{ openshift_logging_fluentd_app_ca }}" + - name: "OPS_HOST" + value: "{{ ops_host }}" + - name: "OPS_PORT" + value: "{{ ops_port }}" + - name: "OPS_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_ops_client_cert }}" + - name: "OPS_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_ops_client_key }}" + - name: "OPS_CA" + value: "{{ openshift_logging_fluentd_ops_ca }}" + - name: "JOURNAL_SOURCE" + value: "{{ openshift_logging_fluentd_journal_source | default('') }}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_size_limit }}" + - name: "FLUENTD_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.cpu + - name: "FLUENTD_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}" +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: "MUX_CLIENT_MODE" + value: "{{ openshift_logging_mux_client_mode }}" +{% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + +{% if audit_container_engine %} + - name: "AUDIT_CONTAINER_ENGINE" + value: "{{ audit_container_engine | lower }}" +{% endif %} + +{% if audit_container_engine %} + - name: "NODE_NAME" + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + +{% if audit_log_file != '' %} + - name: AUDIT_FILE + value: "{{ audit_log_file }}" +{% endif %} + +{% if audit_pos_log_file != '' %} + - name: AUDIT_POS_FILE + value: "{{ audit_pos_log_file }}" +{% endif %} + + volumes: + - name: runlogjournal + hostPath: + path: /run/log/journal + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: config + configMap: + name: logging-fluentd + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: dockercfg + hostPath: + path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + secret: + secretName: logging-mux +{% endif %} + - name: filebufferstorage + hostPath: + path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 deleted file mode 100644 index 6e07b403a..000000000 --- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 +++ /dev/null @@ -1,80 +0,0 @@ -# This file is the fluentd configuration entrypoint. Edit with care. - -@include configs.d/openshift/system.conf - -# In each section below, pre- and post- includes don't include anything initially; -# they exist to enable future additions to openshift conf as needed. - -## sources -{% if deploy_type in ['hosted', 'secure-aggregator'] %} -## ordered so that syslog always runs last... -@include configs.d/openshift/input-pre-*.conf -@include configs.d/dynamic/input-docker-*.conf -@include configs.d/dynamic/input-syslog-*.conf -@include configs.d/openshift/input-post-*.conf -## -{% else %} - - @type secure_forward - @label @INGRESS - - self_hostname ${HOSTNAME} - bind 0.0.0.0 - port {{openshift_logging_fluentd_aggregating_port}} - - shared_key {{openshift_logging_fluentd_shared_key}} - - secure {{openshift_logging_fluentd_aggregating_secure}} - enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} - ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}} - ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} - ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} - - - host {{openshift_logging_fluentd_aggregating_host}} - - -{% endif %} - - - - diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 deleted file mode 100644 index c6256cf49..000000000 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ /dev/null @@ -1,249 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: "DaemonSet" -metadata: - name: "{{ daemonset_name }}" - labels: - provider: openshift - component: "{{ daemonset_component }}" - logging-infra: "{{ daemonset_component }}" -spec: - selector: - matchLabels: - provider: openshift - component: "{{ daemonset_component }}" - updateStrategy: - type: RollingUpdate - rollingUpdate: - minReadySeconds: 600 - template: - metadata: - name: "{{ daemonset_container_name }}" - labels: - logging-infra: "{{ daemonset_component }}" - provider: openshift - component: "{{ daemonset_component }}" - spec: - serviceAccountName: "{{ daemonset_serviceAccount }}" - nodeSelector: - {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" - containers: - - name: "{{ daemonset_container_name }}" - image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}" - imagePullPolicy: IfNotPresent - securityContext: - privileged: true -{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} - resources: -{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %} - limits: -{% if fluentd_cpu_limit is not none %} - cpu: "{{fluentd_cpu_limit}}" -{% endif %} -{% if fluentd_memory_limit is not none %} - memory: "{{fluentd_memory_limit}}" -{% endif %} -{% endif %} -{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} - requests: -{% if fluentd_cpu_request is not none %} - cpu: "{{fluentd_cpu_request}}" -{% endif %} -{% if fluentd_memory_limit is not none %} - memory: "{{fluentd_memory_limit}}" -{% endif %} -{% endif %} -{% endif %} - volumeMounts: - - name: runlogjournal - mountPath: /run/log/journal - - name: varlog - mountPath: /var/log - - name: varlibdockercontainers - mountPath: /var/lib/docker/containers - readOnly: true - - name: config - mountPath: /etc/fluent/configs.d/user - readOnly: true - - name: certs - mountPath: /etc/fluent/keys - readOnly: true - - name: dockerhostname - mountPath: /etc/docker-hostname - readOnly: true - - name: localtime - mountPath: /etc/localtime - readOnly: true - - name: dockercfg - mountPath: /etc/sysconfig/docker - readOnly: true - - name: dockerdaemoncfg - mountPath: /etc/docker - readOnly: true - - name: filebufferstorage - mountPath: /var/lib/fluentd -{% if openshift_logging_mux_client_mode is defined and - ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or - (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - - name: muxcerts - mountPath: /etc/fluent/muxkeys - readOnly: true -{% endif %} - env: - - name: "K8S_HOST_URL" - value: "{{ openshift_logging_fluentd_master_url }}" - - name: "ES_HOST" - value: "{{ app_host }}" - - name: "ES_PORT" - value: "{{ app_port }}" - - name: "ES_CLIENT_CERT" - value: "{{ openshift_logging_fluentd_app_client_cert }}" - - name: "ES_CLIENT_KEY" - value: "{{ openshift_logging_fluentd_app_client_key }}" - - name: "ES_CA" - value: "{{ openshift_logging_fluentd_app_ca }}" - - name: "OPS_HOST" - value: "{{ ops_host }}" - - name: "OPS_PORT" - value: "{{ ops_port }}" - - name: "OPS_CLIENT_CERT" - value: "{{ openshift_logging_fluentd_ops_client_cert }}" - - name: "OPS_CLIENT_KEY" - value: "{{ openshift_logging_fluentd_ops_client_key }}" - - name: "OPS_CA" - value: "{{ openshift_logging_fluentd_ops_ca }}" - - name: "JOURNAL_SOURCE" - value: "{{ openshift_logging_fluentd_journal_source | default('') }}" - - name: "JOURNAL_READ_FROM_HEAD" - value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" - - name: "BUFFER_QUEUE_LIMIT" - value: "{{ openshift_logging_fluentd_buffer_queue_limit }}" - - name: "BUFFER_SIZE_LIMIT" - value: "{{ openshift_logging_fluentd_buffer_size_limit }}" - - name: "FLUENTD_CPU_LIMIT" - valueFrom: - resourceFieldRef: - containerName: "{{ daemonset_container_name }}" - resource: limits.cpu - - name: "FLUENTD_MEMORY_LIMIT" - valueFrom: - resourceFieldRef: - containerName: "{{ daemonset_container_name }}" - resource: limits.memory - - name: "FILE_BUFFER_LIMIT" - value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}" -{% if openshift_logging_mux_client_mode is defined and - ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or - (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - - name: "MUX_CLIENT_MODE" - value: "{{ openshift_logging_mux_client_mode }}" -{% endif %} -{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} - - name: "TRANSFORM_EVENTS" - value: "true" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} - - name: USE_REMOTE_SYSLOG - value: "true" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_host is defined %} - - name: REMOTE_SYSLOG_HOST - value: "{{ openshift_logging_fluentd_remote_syslog_host }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_port is defined %} - - name: REMOTE_SYSLOG_PORT - value: "{{ openshift_logging_fluentd_remote_syslog_port }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_severity is defined %} - - name: REMOTE_SYSLOG_SEVERITY - value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_facility is defined %} - - name: REMOTE_SYSLOG_FACILITY - value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} - - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX - value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} - - name: REMOTE_SYSLOG_TAG_KEY - value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} - - name: REMOTE_SYSLOG_USE_RECORD - value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" -{% endif %} - -{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} - - name: REMOTE_SYSLOG_PAYLOAD_KEY - value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" -{% endif %} - -{% if audit_container_engine %} - - name: "AUDIT_CONTAINER_ENGINE" - value: "{{ audit_container_engine | lower }}" -{% endif %} - -{% if audit_container_engine %} - - name: "NODE_NAME" - valueFrom: - fieldRef: - fieldPath: spec.nodeName -{% endif %} - -{% if audit_log_file != '' %} - - name: AUDIT_FILE - value: "{{ audit_log_file }}" -{% endif %} - -{% if audit_pos_log_file != '' %} - - name: AUDIT_POS_FILE - value: "{{ audit_pos_log_file }}" -{% endif %} - - volumes: - - name: runlogjournal - hostPath: - path: /run/log/journal - - name: varlog - hostPath: - path: /var/log - - name: varlibdockercontainers - hostPath: - path: /var/lib/docker/containers - - name: config - configMap: - name: logging-fluentd - - name: certs - secret: - secretName: logging-fluentd - - name: dockerhostname - hostPath: - path: /etc/hostname - - name: localtime - hostPath: - path: /etc/localtime - - name: dockercfg - hostPath: - path: /etc/sysconfig/docker - - name: dockerdaemoncfg - hostPath: - path: /etc/docker -{% if openshift_logging_mux_client_mode is defined and - ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or - (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} - - name: muxcerts - secret: - secretName: logging-mux -{% endif %} - - name: filebufferstorage - hostPath: - path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index c67235c62..58edc5ce5 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -133,7 +133,7 @@ - name: Generating Kibana route template template: - src: route_reencrypt.j2 + src: "{{ __base_file_dir }}/route_reencrypt.j2" dest: "{{ tempdir }}/templates/kibana-route.yaml" vars: obj_name: "{{ kibana_name }}" @@ -174,7 +174,7 @@ # create oauth client - name: Create oauth-client template template: - src: oauth-client.j2 + src: "{{ __base_file_dir }}/oauth-client.j2" dest: "{{ tempdir }}/templates/oauth-client.yml" vars: kibana_hostnames: "{{ proxy_hostnames | unique }}" @@ -233,7 +233,7 @@ # create Kibana DC - name: Generate Kibana DC template template: - src: kibana.j2 + src: "{{ __base_file_dir }}/kibana.j2" dest: "{{ tempdir }}/templates/kibana-dc.yaml" vars: component: "{{ kibana_component }}" diff --git a/roles/openshift_logging_kibana/templates/2.x/kibana.j2 b/roles/openshift_logging_kibana/templates/2.x/kibana.j2 new file mode 100644 index 000000000..ed05b8458 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/2.x/kibana.j2 @@ -0,0 +1,176 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{ deploy_name }}" + labels: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" +spec: + replicas: {{ kibana_replicas | default(1) }} + selector: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{ deploy_name }}" + labels: + logging-infra: "{{ logging_component }}" + provider: openshift + component: "{{ component }}" + spec: + serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} + nodeSelector: +{% for key, value in kibana_node_selector.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - + name: "kibana" + image: {{ image }} + imagePullPolicy: IfNotPresent +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + resources: +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} + limits: +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} + cpu: "{{ kibana_cpu_limit }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + requests: +{% if kibana_cpu_request is not none and kibana_cpu_request != "" %} + cpu: "{{ kibana_cpu_request }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + env: + - name: "ES_HOST" + value: "{{ es_host }}" + - name: "ES_PORT" + value: "{{ es_port }}" + - + name: "KIBANA_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana + resource: limits.memory +{% for key, value in kibana_env_vars.items() %} + - name: "{{ key }}" + value: "{{ value }}" +{% endfor %} + volumeMounts: + - name: kibana + mountPath: /etc/kibana/keys + readOnly: true + readinessProbe: + exec: + command: + - "/usr/share/kibana/probe/readiness.sh" + initialDelaySeconds: 5 + timeoutSeconds: 4 + periodSeconds: 5 + - + name: "kibana-proxy" + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + resources: +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} + limits: +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} + cpu: "{{ kibana_proxy_cpu_limit }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + requests: +{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %} + cpu: "{{ kibana_proxy_cpu_request }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + ports: + - + name: "oaproxy" + containerPort: 3000 + env: + - + name: "OAP_BACKEND_URL" + value: "http://localhost:5601" + - + name: "OAP_AUTH_MODE" + value: "oauth2" + - + name: "OAP_TRANSFORM" + value: "user_header,token_header" + - + name: "OAP_OAUTH_ID" + value: kibana-proxy + - + name: "OAP_MASTER_URL" + value: {{ openshift_logging_kibana_master_url }} + - + name: "OAP_PUBLIC_MASTER_URL" + value: {{ openshift_logging_kibana_master_public_url }} + - + name: "OAP_LOGOUT_REDIRECT" + value: {{ openshift_logging_kibana_master_public_url }}/console/logout + - + name: "OAP_MASTER_CA_FILE" + value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + - + name: "OAP_DEBUG" + value: "{{ openshift_logging_kibana_proxy_debug }}" + - + name: "OAP_OAUTH_SECRET_FILE" + value: "/secret/oauth-secret" + - + name: "OAP_SERVER_CERT_FILE" + value: "/secret/server-cert" + - + name: "OAP_SERVER_KEY_FILE" + value: "/secret/server-key" + - + name: "OAP_SERVER_TLS_FILE" + value: "/secret/server-tls.json" + - + name: "OAP_SESSION_SECRET_FILE" + value: "/secret/session-secret" + - + name: "OCP_AUTH_PROXY_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana-proxy + resource: limits.memory + volumeMounts: + - name: kibana-proxy + mountPath: /secret + readOnly: true + volumes: + - name: kibana + secret: + secretName: logging-kibana + - name: kibana-proxy + secret: + secretName: logging-kibana-proxy diff --git a/roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 b/roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 new file mode 100644 index 000000000..c80ff3d30 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: OAuthClient +metadata: + name: kibana-proxy + labels: + logging-infra: support +secret: {{ secret }} +redirectURIs: +{% for host in kibana_hostnames %} +- {{ host }} +{% endfor %} +scopeRestrictions: +- literals: + - user:info + - user:check-access + - user:list-projects diff --git a/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_kibana/templates/5.x/kibana.j2 b/roles/openshift_logging_kibana/templates/5.x/kibana.j2 new file mode 100644 index 000000000..0f946573d --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/kibana.j2 @@ -0,0 +1,170 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{ deploy_name }}" + labels: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" +spec: + replicas: {{ kibana_replicas | default(1) }} + selector: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{ deploy_name }}" + labels: + logging-infra: "{{ logging_component }}" + provider: openshift + component: "{{ component }}" + spec: + serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} + nodeSelector: +{% for key, value in kibana_node_selector.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - + name: "kibana" + image: {{ image }} + imagePullPolicy: IfNotPresent +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + resources: +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} + limits: +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} + cpu: "{{ kibana_cpu_limit }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + requests: +{% if kibana_cpu_request is not none and kibana_cpu_request != "" %} + cpu: "{{ kibana_cpu_request }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + env: + - name: "ES_URL" + value: "https://{{ es_host }}:{{ es_port }}" + - + name: "KIBANA_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana + resource: limits.memory + volumeMounts: + - name: kibana + mountPath: /etc/kibana/keys + readOnly: true + readinessProbe: + exec: + command: + - "/usr/share/kibana/probe/readiness.sh" + initialDelaySeconds: 5 + timeoutSeconds: 4 + periodSeconds: 5 + - + name: "kibana-proxy" + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + resources: +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} + limits: +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} + cpu: "{{ kibana_proxy_cpu_limit }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + requests: +{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %} + cpu: "{{ kibana_proxy_cpu_request }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + ports: + - + name: "oaproxy" + containerPort: 3000 + env: + - + name: "OAP_BACKEND_URL" + value: "http://localhost:5601" + - + name: "OAP_AUTH_MODE" + value: "oauth2" + - + name: "OAP_TRANSFORM" + value: "user_header,token_header" + - + name: "OAP_OAUTH_ID" + value: kibana-proxy + - + name: "OAP_MASTER_URL" + value: {{ openshift_logging_kibana_master_url }} + - + name: "OAP_PUBLIC_MASTER_URL" + value: {{ openshift_logging_kibana_master_public_url }} + - + name: "OAP_LOGOUT_REDIRECT" + value: {{ openshift_logging_kibana_master_public_url }}/console/logout + - + name: "OAP_MASTER_CA_FILE" + value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + - + name: "OAP_DEBUG" + value: "{{ openshift_logging_kibana_proxy_debug }}" + - + name: "OAP_OAUTH_SECRET_FILE" + value: "/secret/oauth-secret" + - + name: "OAP_SERVER_CERT_FILE" + value: "/secret/server-cert" + - + name: "OAP_SERVER_KEY_FILE" + value: "/secret/server-key" + - + name: "OAP_SERVER_TLS_FILE" + value: "/secret/server-tls.json" + - + name: "OAP_SESSION_SECRET_FILE" + value: "/secret/session-secret" + - + name: "OCP_AUTH_PROXY_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana-proxy + resource: limits.memory + volumeMounts: + - name: kibana-proxy + mountPath: /secret + readOnly: true + volumes: + - name: kibana + secret: + secretName: logging-kibana + - name: kibana-proxy + secret: + secretName: logging-kibana-proxy diff --git a/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 b/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 new file mode 100644 index 000000000..c80ff3d30 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: OAuthClient +metadata: + name: kibana-proxy + labels: + logging-infra: support +secret: {{ secret }} +redirectURIs: +{% for host in kibana_hostnames %} +- {{ host }} +{% endfor %} +scopeRestrictions: +- literals: + - user:info + - user:check-access + - user:list-projects diff --git a/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 deleted file mode 100644 index ed05b8458..000000000 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ /dev/null @@ -1,176 +0,0 @@ -apiVersion: "v1" -kind: "DeploymentConfig" -metadata: - name: "{{ deploy_name }}" - labels: - provider: openshift - component: "{{ component }}" - logging-infra: "{{ logging_component }}" -spec: - replicas: {{ kibana_replicas | default(1) }} - selector: - provider: openshift - component: "{{ component }}" - logging-infra: "{{ logging_component }}" - strategy: - rollingParams: - intervalSeconds: 1 - timeoutSeconds: 600 - updatePeriodSeconds: 1 - type: Rolling - template: - metadata: - name: "{{ deploy_name }}" - labels: - logging-infra: "{{ logging_component }}" - provider: openshift - component: "{{ component }}" - spec: - serviceAccountName: aggregated-logging-kibana -{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} - nodeSelector: -{% for key, value in kibana_node_selector.items() %} - {{ key }}: "{{ value }}" -{% endfor %} -{% endif %} - containers: - - - name: "kibana" - image: {{ image }} - imagePullPolicy: IfNotPresent -{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} - resources: -{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} - limits: -{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} - cpu: "{{ kibana_cpu_limit }}" -{% endif %} -{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} - memory: "{{ kibana_memory_limit }}" -{% endif %} -{% endif %} -{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} - requests: -{% if kibana_cpu_request is not none and kibana_cpu_request != "" %} - cpu: "{{ kibana_cpu_request }}" -{% endif %} -{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} - memory: "{{ kibana_memory_limit }}" -{% endif %} -{% endif %} -{% endif %} - env: - - name: "ES_HOST" - value: "{{ es_host }}" - - name: "ES_PORT" - value: "{{ es_port }}" - - - name: "KIBANA_MEMORY_LIMIT" - valueFrom: - resourceFieldRef: - containerName: kibana - resource: limits.memory -{% for key, value in kibana_env_vars.items() %} - - name: "{{ key }}" - value: "{{ value }}" -{% endfor %} - volumeMounts: - - name: kibana - mountPath: /etc/kibana/keys - readOnly: true - readinessProbe: - exec: - command: - - "/usr/share/kibana/probe/readiness.sh" - initialDelaySeconds: 5 - timeoutSeconds: 4 - periodSeconds: 5 - - - name: "kibana-proxy" - image: {{ proxy_image }} - imagePullPolicy: IfNotPresent -{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} - resources: -{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} - limits: -{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} - cpu: "{{ kibana_proxy_cpu_limit }}" -{% endif %} -{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} - memory: "{{ kibana_proxy_memory_limit }}" -{% endif %} -{% endif %} -{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} - requests: -{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %} - cpu: "{{ kibana_proxy_cpu_request }}" -{% endif %} -{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} - memory: "{{ kibana_proxy_memory_limit }}" -{% endif %} -{% endif %} -{% endif %} - ports: - - - name: "oaproxy" - containerPort: 3000 - env: - - - name: "OAP_BACKEND_URL" - value: "http://localhost:5601" - - - name: "OAP_AUTH_MODE" - value: "oauth2" - - - name: "OAP_TRANSFORM" - value: "user_header,token_header" - - - name: "OAP_OAUTH_ID" - value: kibana-proxy - - - name: "OAP_MASTER_URL" - value: {{ openshift_logging_kibana_master_url }} - - - name: "OAP_PUBLIC_MASTER_URL" - value: {{ openshift_logging_kibana_master_public_url }} - - - name: "OAP_LOGOUT_REDIRECT" - value: {{ openshift_logging_kibana_master_public_url }}/console/logout - - - name: "OAP_MASTER_CA_FILE" - value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - - - name: "OAP_DEBUG" - value: "{{ openshift_logging_kibana_proxy_debug }}" - - - name: "OAP_OAUTH_SECRET_FILE" - value: "/secret/oauth-secret" - - - name: "OAP_SERVER_CERT_FILE" - value: "/secret/server-cert" - - - name: "OAP_SERVER_KEY_FILE" - value: "/secret/server-key" - - - name: "OAP_SERVER_TLS_FILE" - value: "/secret/server-tls.json" - - - name: "OAP_SESSION_SECRET_FILE" - value: "/secret/session-secret" - - - name: "OCP_AUTH_PROXY_MEMORY_LIMIT" - valueFrom: - resourceFieldRef: - containerName: kibana-proxy - resource: limits.memory - volumeMounts: - - name: kibana-proxy - mountPath: /secret - readOnly: true - volumes: - - name: kibana - secret: - secretName: logging-kibana - - name: kibana-proxy - secret: - secretName: logging-kibana-proxy diff --git a/roles/openshift_logging_kibana/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2 deleted file mode 100644 index c80ff3d30..000000000 --- a/roles/openshift_logging_kibana/templates/oauth-client.j2 +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: OAuthClient -metadata: - name: kibana-proxy - labels: - logging-infra: support -secret: {{ secret }} -redirectURIs: -{% for host in kibana_hostnames %} -- {{ host }} -{% endfor %} -scopeRestrictions: -- literals: - - user:info - - user:check-access - - user:list-projects diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 deleted file mode 100644 index d2e8b8bcb..000000000 --- a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: "v1" -kind: "Route" -metadata: - name: "{{obj_name}}" -{% if labels is defined%} - labels: -{% for key, value in labels.items() %} - {{key}}: {{value}} -{% endfor %} -{% endif %} -spec: - host: {{ route_host }} - tls: -{% if tls_key is defined and tls_key | length > 0 %} - key: | -{{ tls_key|indent(6, true) }} -{% if tls_cert is defined and tls_cert | length > 0 %} - certificate: | -{{ tls_cert|indent(6, true) }} -{% endif %} -{% endif %} - caCertificate: | -{% for line in tls_ca_cert.split('\n') %} - {{ line }} -{% endfor %} - destinationCACertificate: | -{% for line in tls_dest_ca_cert.split('\n') %} - {{ line }} -{% endfor %} - termination: reencrypt -{% if edge_term_policy is defined and edge_term_policy | length > 0 %} - insecureEdgeTerminationPolicy: {{ edge_term_policy }} -{% endif %} - to: - kind: Service - name: {{ service_name }} diff --git a/roles/openshift_logging_mux/files/2.x/fluent.conf b/roles/openshift_logging_mux/files/2.x/fluent.conf new file mode 100644 index 000000000..bf61c9811 --- /dev/null +++ b/roles/openshift_logging_mux/files/2.x/fluent.conf @@ -0,0 +1,37 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## + + + + diff --git a/roles/openshift_logging_mux/files/2.x/secure-forward.conf b/roles/openshift_logging_mux/files/2.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_mux/files/2.x/secure-forward.conf @@ -0,0 +1,26 @@ +# +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# + # or IP +# host server.fqdn.example.com +# port 24284 +# +# + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# +# diff --git a/roles/openshift_logging_mux/files/5.x/fluent.conf b/roles/openshift_logging_mux/files/5.x/fluent.conf new file mode 100644 index 000000000..bf61c9811 --- /dev/null +++ b/roles/openshift_logging_mux/files/5.x/fluent.conf @@ -0,0 +1,37 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## + + + + diff --git a/roles/openshift_logging_mux/files/5.x/secure-forward.conf b/roles/openshift_logging_mux/files/5.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_mux/files/5.x/secure-forward.conf @@ -0,0 +1,26 @@ +# +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# + # or IP +# host server.fqdn.example.com +# port 24284 +# +# + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# +# diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf deleted file mode 100644 index bf61c9811..000000000 --- a/roles/openshift_logging_mux/files/fluent.conf +++ /dev/null @@ -1,37 +0,0 @@ -# This file is the fluentd configuration entrypoint. Edit with care. - -@include configs.d/openshift/system.conf - -# In each section below, pre- and post- includes don't include anything initially; -# they exist to enable future additions to openshift conf as needed. - -## sources -## ordered so that syslog always runs last... -@include configs.d/openshift/input-pre-*.conf -@include configs.d/dynamic/input-docker-*.conf -@include configs.d/dynamic/input-syslog-*.conf -@include configs.d/openshift/input-post-*.conf -## - - - - diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf deleted file mode 100644 index 87410c1c5..000000000 --- a/roles/openshift_logging_mux/files/secure-forward.conf +++ /dev/null @@ -1,26 +0,0 @@ -# -# @type secure_forward - -# self_hostname ${HOSTNAME} -# shared_key - -# secure yes -# enable_strict_verification yes - -# ca_cert_path /etc/fluent/keys/your_ca_cert -# ca_private_key_path /etc/fluent/keys/your_private_key - # for private CA secret key -# ca_private_key_passphrase passphrase - -# - # or IP -# host server.fqdn.example.com -# port 24284 -# -# - # ip address to connect -# host 203.0.113.8 - # specify hostlabel for FQDN verification if ipaddress is used for host -# hostlabel server.fqdn.example.com -# -# diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 7eba3cda4..b2699b285 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -86,12 +86,12 @@ # create Mux configmap - copy: - src: fluent.conf + src: "{{ __base_file_dir }}/fluent.conf" dest: "{{mktemp.stdout}}/fluent-mux.conf" changed_when: no - copy: - src: secure-forward.conf + src: "{{ __base_file_dir }}/secure-forward.conf" dest: "{{mktemp.stdout}}/secure-forward-mux.conf" changed_when: no @@ -170,7 +170,7 @@ # create Mux DC - name: Generating mux deploymentconfig template: - src: mux.j2 + src: "{{ __base_file_dir }}/mux.j2" dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml" vars: component: mux diff --git a/roles/openshift_logging_mux/templates/2.x/mux.j2 b/roles/openshift_logging_mux/templates/2.x/mux.j2 new file mode 100644 index 000000000..2337c33d5 --- /dev/null +++ b/roles/openshift_logging_mux/templates/2.x/mux.j2 @@ -0,0 +1,202 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{mux_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-mux +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} + nodeSelector: +{% for key, value in mux_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "mux" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + resources: +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + limits: +{% if mux_cpu_limit is not none %} + cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + requests: +{% if mux_cpu_request is not none %} + cpu: "{{mux_cpu_request}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + ports: + - containerPort: {{ openshift_logging_mux_port }} + name: mux-forward + volumeMounts: + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd + env: + - name: "K8S_HOST_URL" + value: "{{openshift_logging_mux_master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_mux_app_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_mux_app_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_mux_app_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_mux_app_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_mux_app_ca}}" + - name: "OPS_HOST" + value: "{{openshift_logging_mux_ops_host}}" + - name: "OPS_PORT" + value: "{{openshift_logging_mux_ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_mux_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_mux_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_mux_ops_ca}}" + - name: "JOURNAL_SOURCE" + value: "{{openshift_logging_mux_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_mux_journal_read_from_head|lower}}" + - name: FORWARD_LISTEN_HOST + value: "{{ openshift_logging_mux_hostname }}" + - name: FORWARD_LISTEN_PORT + value: "{{ openshift_logging_mux_port }}" + - name: USE_MUX + value: "true" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_mux_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_mux_buffer_size_limit }}" + - name: "MUX_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.cpu + - name: "MUX_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + + volumes: + - name: config + configMap: + name: logging-mux + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: muxcerts + secret: + secretName: logging-mux + - name: filebufferstorage +{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} +{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} + hostPath: + path: "/var/log/fluentd" +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_mux/templates/5.x/mux.j2 b/roles/openshift_logging_mux/templates/5.x/mux.j2 new file mode 100644 index 000000000..2337c33d5 --- /dev/null +++ b/roles/openshift_logging_mux/templates/5.x/mux.j2 @@ -0,0 +1,202 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{mux_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-mux +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} + nodeSelector: +{% for key, value in mux_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "mux" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + resources: +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + limits: +{% if mux_cpu_limit is not none %} + cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + requests: +{% if mux_cpu_request is not none %} + cpu: "{{mux_cpu_request}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + ports: + - containerPort: {{ openshift_logging_mux_port }} + name: mux-forward + volumeMounts: + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd + env: + - name: "K8S_HOST_URL" + value: "{{openshift_logging_mux_master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_mux_app_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_mux_app_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_mux_app_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_mux_app_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_mux_app_ca}}" + - name: "OPS_HOST" + value: "{{openshift_logging_mux_ops_host}}" + - name: "OPS_PORT" + value: "{{openshift_logging_mux_ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_mux_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_mux_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_mux_ops_ca}}" + - name: "JOURNAL_SOURCE" + value: "{{openshift_logging_mux_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_mux_journal_read_from_head|lower}}" + - name: FORWARD_LISTEN_HOST + value: "{{ openshift_logging_mux_hostname }}" + - name: FORWARD_LISTEN_PORT + value: "{{ openshift_logging_mux_port }}" + - name: USE_MUX + value: "true" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_mux_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_mux_buffer_size_limit }}" + - name: "MUX_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.cpu + - name: "MUX_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + + volumes: + - name: config + configMap: + name: logging-mux + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: muxcerts + secret: + secretName: logging-mux + - name: filebufferstorage +{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} +{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} + hostPath: + path: "/var/log/fluentd" +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 deleted file mode 100644 index 2337c33d5..000000000 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ /dev/null @@ -1,202 +0,0 @@ -apiVersion: "v1" -kind: "DeploymentConfig" -metadata: - name: "{{deploy_name}}" - labels: - provider: openshift - component: "{{component}}" - logging-infra: "{{logging_component}}" -spec: - replicas: {{mux_replicas|default(1)}} - selector: - provider: openshift - component: "{{component}}" - logging-infra: "{{logging_component}}" - strategy: - rollingParams: - intervalSeconds: 1 - timeoutSeconds: 600 - updatePeriodSeconds: 1 - type: Rolling - template: - metadata: - name: "{{deploy_name}}" - labels: - logging-infra: "{{logging_component}}" - provider: openshift - component: "{{component}}" - spec: - serviceAccountName: aggregated-logging-mux -{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} - nodeSelector: -{% for key, value in mux_node_selector.items() %} - {{key}}: "{{value}}" -{% endfor %} -{% endif %} - containers: - - name: "mux" - image: {{image}} - imagePullPolicy: IfNotPresent -{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} - resources: -{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} - limits: -{% if mux_cpu_limit is not none %} - cpu: "{{mux_cpu_limit}}" -{% endif %} -{% if mux_memory_limit is not none %} - memory: "{{mux_memory_limit}}" -{% endif %} -{% endif %} -{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} - requests: -{% if mux_cpu_request is not none %} - cpu: "{{mux_cpu_request}}" -{% endif %} -{% if mux_memory_limit is not none %} - memory: "{{mux_memory_limit}}" -{% endif %} -{% endif %} -{% endif %} - ports: - - containerPort: {{ openshift_logging_mux_port }} - name: mux-forward - volumeMounts: - - name: config - mountPath: /etc/fluent/configs.d/user - readOnly: true - - name: certs - mountPath: /etc/fluent/keys - readOnly: true - - name: dockerhostname - mountPath: /etc/docker-hostname - readOnly: true - - name: localtime - mountPath: /etc/localtime - readOnly: true - - name: muxcerts - mountPath: /etc/fluent/muxkeys - readOnly: true - - name: filebufferstorage - mountPath: /var/lib/fluentd - env: - - name: "K8S_HOST_URL" - value: "{{openshift_logging_mux_master_url}}" - - name: "ES_HOST" - value: "{{openshift_logging_mux_app_host}}" - - name: "ES_PORT" - value: "{{openshift_logging_mux_app_port}}" - - name: "ES_CLIENT_CERT" - value: "{{openshift_logging_mux_app_client_cert}}" - - name: "ES_CLIENT_KEY" - value: "{{openshift_logging_mux_app_client_key}}" - - name: "ES_CA" - value: "{{openshift_logging_mux_app_ca}}" - - name: "OPS_HOST" - value: "{{openshift_logging_mux_ops_host}}" - - name: "OPS_PORT" - value: "{{openshift_logging_mux_ops_port}}" - - name: "OPS_CLIENT_CERT" - value: "{{openshift_logging_mux_ops_client_cert}}" - - name: "OPS_CLIENT_KEY" - value: "{{openshift_logging_mux_ops_client_key}}" - - name: "OPS_CA" - value: "{{openshift_logging_mux_ops_ca}}" - - name: "JOURNAL_SOURCE" - value: "{{openshift_logging_mux_journal_source | default('')}}" - - name: "JOURNAL_READ_FROM_HEAD" - value: "{{openshift_logging_mux_journal_read_from_head|lower}}" - - name: FORWARD_LISTEN_HOST - value: "{{ openshift_logging_mux_hostname }}" - - name: FORWARD_LISTEN_PORT - value: "{{ openshift_logging_mux_port }}" - - name: USE_MUX - value: "true" - - name: "BUFFER_QUEUE_LIMIT" - value: "{{ openshift_logging_mux_buffer_queue_limit }}" - - name: "BUFFER_SIZE_LIMIT" - value: "{{ openshift_logging_mux_buffer_size_limit }}" - - name: "MUX_CPU_LIMIT" - valueFrom: - resourceFieldRef: - containerName: "mux" - resource: limits.cpu - - name: "MUX_MEMORY_LIMIT" - valueFrom: - resourceFieldRef: - containerName: "mux" - resource: limits.memory - - name: "FILE_BUFFER_LIMIT" - value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" - -{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} - - name: USE_REMOTE_SYSLOG - value: "true" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_host is defined %} - - name: REMOTE_SYSLOG_HOST - value: "{{ openshift_logging_mux_remote_syslog_host }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_port is defined %} - - name: REMOTE_SYSLOG_PORT - value: "{{ openshift_logging_mux_remote_syslog_port }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_severity is defined %} - - name: REMOTE_SYSLOG_SEVERITY - value: "{{ openshift_logging_mux_remote_syslog_severity }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_facility is defined %} - - name: REMOTE_SYSLOG_FACILITY - value: "{{ openshift_logging_mux_remote_syslog_facility }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} - - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX - value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_tag_key is defined %} - - name: REMOTE_SYSLOG_TAG_KEY - value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_use_record is defined %} - - name: REMOTE_SYSLOG_USE_RECORD - value: "{{ openshift_logging_mux_remote_syslog_use_record }}" -{% endif %} - -{% if openshift_logging_mux_remote_syslog_payload_key is defined %} - - name: REMOTE_SYSLOG_PAYLOAD_KEY - value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" -{% endif %} - - volumes: - - name: config - configMap: - name: logging-mux - - name: certs - secret: - secretName: logging-fluentd - - name: dockerhostname - hostPath: - path: /etc/hostname - - name: localtime - hostPath: - path: /etc/localtime - - name: muxcerts - secret: - secretName: logging-mux - - name: filebufferstorage -{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} - persistentVolumeClaim: - claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} -{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} - hostPath: - path: "/var/log/fluentd" -{% else %} - emptydir: {} -{% endif %} -- cgit v1.2.1