summaryrefslogtreecommitdiffstats
path: root/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
blob: 625af9acdb2f700c1ef7d289848ab011508003c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
---
  - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
    when: target_registry is defined and target_registry

  - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
    when: "openshift_hosted_logging_hostname is not defined or
          openshift_hosted_logging_elasticsearch_cluster_size is not defined or
          openshift_hosted_logging_master_public_url is not defined"

  - name: Create temp directory for kubeconfig
    command: mktemp -d /tmp/openshift-ansible-XXXXXX
    register: mktemp
    changed_when: False

  - name: Copy the admin client config(s)
    command: >
      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
    changed_when: False

  - name: "Check for logging project already exists"
    command: >
      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
    register: logging_project_result
    ignore_errors: True

  - name: "Create logging project"
    command: >
      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
    when: logging_project_result.stdout == ""

  - name: "Changing projects"
    command: >
      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging

  - name: "Creating logging deployer secret"
    command: >
      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
    register: secret_output
    failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"

  - name: "Create templates for logging accounts and the deployer"
    command: >
      {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
      -f {{ hosted_base }}/logging-deployer.yaml
      --config={{ mktemp.stdout }}/admin.kubeconfig
      -n logging
    register: logging_import_template
    failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
    changed_when: "'created' in logging_import_template.stdout"

  - name: "Process the logging accounts template"
    shell: >
      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
      process logging-deployer-account-template |  {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
    register: process_deployer_accounts
    failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr

  - name: "Set permissions for logging-deployer service account"
    command: >
      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
      policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
    register: permiss_output
    failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"

  - name: "Set permissions for fluentd"
    command: >
      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
      policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
    register: fluentd_output
    failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"

  - name: "Set additional permissions for fluentd"
    command: >
      {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
      add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
    register: fluentd2_output
    failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"

  - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
    command: >
      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
      policy add-cluster-role-to-user rolebinding-reader \
      system:serviceaccount:logging:aggregated-logging-elasticsearch
    register: rolebinding_reader_output
    failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"

  - name: "Create ConfigMap for deployer parameters"
    command: >
      {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
    register: deployer_configmap_output
    failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"

  - name: "Process the deployer template"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
    register: process_deployer
    failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr

  - name: "Wait for image pull and deployer pod"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
    register: result
    until: result.rc == 0
    retries: 20
    delay: 15

  - name: "Process imagestream template"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
    register: process_is
    failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr

  - name: "Set insecured registry"
    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all  openshift.io/image.insecureRepository=true --overwrite"
    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry

  - name: "Wait for imagestreams to become available"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
    register: result
    until: result.rc == 0
    failed_when: result.rc == 1 and 'not found' not in result.stderr
    retries: 20
    delay: 5

  - name: "Wait for component pods to be running"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
    with_items:
      - es
      - kibana
      - curator
    register: result
    until: result.rc == 0
    failed_when: result.rc == 1 or 'Error' in result.stderr
    retries: 20
    delay: 15

  - name: "Wait for ops component pods to be running"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
    with_items:
      - es-ops
      - kibana-ops
      - curator-ops
    when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
    register: result
    until: result.rc == 0
    failed_when: result.rc == 1 or 'Error' in result.stderr
    retries: 20
    delay: 15

  - name: "Wait for fluentd DaemonSet to exist"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
    register: result
    until: result.rc == 0
    failed_when: result.rc == 1 or 'Error' in result.stderr
    retries: 20
    delay: 5

  - name: "Deploy fluentd by labeling the node"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"

  - name: "Wait for fluentd to be running"
    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
    register: result
    until: result.rc == 0
    failed_when: result.rc == 1 or 'Error' in result.stderr
    retries: 20
    delay: 15

  - debug:
      msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"

  - name: Delete temp directory
    file:
      name: "{{ mktemp.stdout }}"
      state: absent
    changed_when: False