apiVersion: "v1" kind: "DeploymentConfig" metadata: name: "{{ deploy_name }}" labels: provider: openshift component: "{{ component }}" logging-infra: "{{ logging_component }}" spec: replicas: {{ kibana_replicas | default(1) }} selector: provider: openshift component: "{{ component }}" logging-infra: "{{ logging_component }}" strategy: rollingParams: intervalSeconds: 1 timeoutSeconds: 600 updatePeriodSeconds: 1 type: Rolling template: metadata: name: "{{ deploy_name }}" labels: logging-infra: "{{ logging_component }}" provider: openshift component: "{{ component }}" spec: serviceAccountName: aggregated-logging-kibana {% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} nodeSelector: {% for key, value in kibana_node_selector.iteritems() %} {{ key }}: "{{ value }}" {% endfor %} {% endif %} containers: - name: "kibana" image: {{ image }} imagePullPolicy: Always {% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} resources: limits: {% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} cpu: "{{ kibana_cpu_limit }}" {% endif %} {% if kibana_memory_limit is not none and kibana_memory_limit != "" %} memory: "{{ kibana_memory_limit }}" requests: memory: "{{ kibana_memory_limit }}" {% endif %} {% endif %} env: - name: "ES_HOST" value: "{{ es_host }}" - name: "ES_PORT" value: "{{ es_port }}" - name: "KIBANA_MEMORY_LIMIT" valueFrom: resourceFieldRef: containerName: kibana resource: limits.memory volumeMounts: - name: kibana mountPath: /etc/kibana/keys readOnly: true readinessProbe: exec: command: - "/usr/share/kibana/probe/readiness.sh" initialDelaySeconds: 5 timeoutSeconds: 4 periodSeconds: 5 - name: "kibana-proxy" image: {{ proxy_image }} imagePullPolicy: Always {% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} resources: limits: {% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} cpu: "{{ kibana_proxy_cpu_limit }}" {% endif %} {% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} memory: "{{ kibana_proxy_memory_limit }}" requests: memory: "{{ kibana_proxy_memory_limit }}" {% endif %} {% endif %} ports: - name: "oaproxy" containerPort: 3000 env: - name: "OAP_BACKEND_URL" value: "http://localhost:5601" - name: "OAP_AUTH_MODE" value: "oauth2" - name: "OAP_TRANSFORM" value: "user_header,token_header" - name: "OAP_OAUTH_ID" value: kibana-proxy - name: "OAP_MASTER_URL" value: {{ openshift_logging_kibana_master_url }} - name: "OAP_PUBLIC_MASTER_URL" value: {{ openshift_logging_kibana_master_public_url }} - name: "OAP_LOGOUT_REDIRECT" value: {{ openshift_logging_kibana_master_public_url }}/console/logout - name: "OAP_MASTER_CA_FILE" value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - name: "OAP_DEBUG" value: "{{ openshift_logging_kibana_proxy_debug }}" - name: "OAP_OAUTH_SECRET_FILE" value: "/secret/oauth-secret" - name: "OAP_SERVER_CERT_FILE" value: "/secret/server-cert" - name: "OAP_SERVER_KEY_FILE" value: "/secret/server-key" - name: "OAP_SERVER_TLS_FILE" value: "/secret/server-tls.json" - name: "OAP_SESSION_SECRET_FILE" value: "/secret/session-secret" - name: "OCP_AUTH_PROXY_MEMORY_LIMIT" valueFrom: resourceFieldRef: containerName: kibana-proxy resource: limits.memory volumeMounts: - name: kibana-proxy mountPath: /secret readOnly: true volumes: - name: kibana secret: secretName: logging-kibana - name: kibana-proxy secret: secretName: logging-kibana-proxy