summaryrefslogtreecommitdiffstats
path: root/roles/openshift_hosted_logging/tasks
diff options
context:
space:
mode:
authorRich Megginson <rmeggins@redhat.com>2016-06-03 16:32:04 -0600
committerRich Megginson <rmeggins@redhat.com>2016-09-01 16:20:46 -0600
commit8affb2acd8018182137536f4ed1dc3a6b7bb9e20 (patch)
treebd8ace2a76fb01debcad4a7820b41fdc5556e6da /roles/openshift_hosted_logging/tasks
parentbdb4788982c71214b409b8db3ba2054305c851df (diff)
downloadopenshift-8affb2acd8018182137536f4ed1dc3a6b7bb9e20.tar.gz
openshift-8affb2acd8018182137536f4ed1dc3a6b7bb9e20.tar.bz2
openshift-8affb2acd8018182137536f4ed1dc3a6b7bb9e20.tar.xz
openshift-8affb2acd8018182137536f4ed1dc3a6b7bb9e20.zip
initial support for v1.3 with logging v1.3
This also includes some fixes to make deploying logging more idempotent, such as ignoring failures if trying to create objects that already exist
Diffstat (limited to 'roles/openshift_hosted_logging/tasks')
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml90
1 files changed, 62 insertions, 28 deletions
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index f19ce3e2b..082bb6ea2 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -1,4 +1,7 @@
---
+ - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
+ when: target_registry is defined and target_registry
+
- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
when: "openshift_hosted_logging_hostname is not defined or
openshift_hosted_logging_elasticsearch_cluster_size is not defined or
@@ -35,21 +38,20 @@
register: secret_output
failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
- - name: "Copy serviceAccount file"
- copy:
- dest: /tmp/logging-deployer-sa.yaml
- src: "{{role_path}}/files/logging-deployer-sa.yaml"
- force: yes
-
- - name: "Create logging-deployer service account"
+ - name: "Create templates for logging accounts and the deployer"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml
- register: deployer_output
- failed_when: "deployer_output.rc == 1 and 'exists' not in deployer_output.stderr"
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml
+ register: template_output
+ failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
+
+ - name: "Process the logging accounts template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+ register: process_deployer_accounts
+ failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
- name: "Set permissions for logging-deployer service account"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
@@ -65,14 +67,14 @@
register: fluentd2_output
failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
- - name: "Create deployer template"
+ - name: "Create ConfigMap for deployer parameters"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml
- register: template_output
- failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
+ {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
+ register: deployer_configmap_output
+ failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
- name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-template -v {{ oc_process_values }} | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
register: process_deployer
failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
@@ -83,39 +85,71 @@
retries: 15
delay: 10
- - name: "Process support template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-support-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
- register: process_support
- failed_when: process_support.rc == 1 and 'already exists' not in process_support.stderr
+ - name: "Process imagestream template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: process_is
+ failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
- name: "Set insecured registry"
command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: "target_registry is defined and insecure_registry == 'true'"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- name: "Wait for imagestreams to become available"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
register: result
until: result.rc == 0
failed_when: result.rc == 1 and 'not found' not in result.stderr
retries: 20
delay: 10
- - name: "Wait for replication controllers to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc | grep logging-fluentd-1"
+ - name: "Wait for component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es
+ - kibana
+ - curator
register: result
until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 10
+
+ - name: "Wait for ops component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es-ops
+ - kibana-ops
+ - curator-ops
+ when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
delay: 10
+ - name: "Wait for fluentd DaemonSet to exist"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 10
- - name: "Scale fluentd deployment config"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}
+ - name: "Deploy fluentd by labeling the node"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{ openshift_hostname }} {{ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}"
+ - name: "Wait for fluentd to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 10
- debug:
- msg: "Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
+ msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
- name: Delete temp directory
file: