summaryrefslogtreecommitdiffstats
path: root/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
blob: a5774cc757b8493e2153d85e640fe43067a19388 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
---

- name: Delete pre-existing heketi resources
  oc_obj:
    namespace: "{{ glusterfs_namespace }}"
    kind: "{{ item.kind }}"
    name: "{{ item.name | default(omit) }}"
    selector: "{{ item.selector | default(omit) }}"
    state: absent
  with_items:
  - kind: "template,route,service,dc,jobs,secret"
    selector: "deploy-heketi"
  - kind: "svc"
    name: "heketi-storage-endpoints"
  - kind: "svc"
    name: "heketi-storage"
  - kind: "secret"
    name: "heketi-{{ glusterfs_name }}-topology-secret"
  - kind: "template,route,service,dc"
    name: "heketi-{{ glusterfs_name }}"
  - kind: "svc"
    name: "heketi-db-{{ glusterfs_name }}-endpoints"
  - kind: "sa"
    name: "heketi-{{ glusterfs_name }}-service-account"
  - kind: "secret"
    name: "heketi-{{ glusterfs_name }}-admin-secret"
  - kind: "secret"
    name: "heketi-{{ glusterfs_name }}-config-secret"
  failed_when: False

- name: Delete pre-existing GlusterFS resources
  oc_obj:
    namespace: "{{ glusterfs_namespace }}"
    kind: "{{ item.kind }}"
    name: "{{ item.name }}"
    state: absent
  with_items:
  - kind: template
    name: glusterfs
  - kind: daemonset
    name: "glusterfs-{{ glusterfs_name }}"
  - kind: storageclass
    name: "glusterfs-{{ glusterfs_name }}"

- name: Unlabel any existing GlusterFS nodes
  oc_label:
    name: "{{ hostvars[item].openshift.node.nodename }}"
    kind: node
    state: absent
    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
  with_items: "{{ groups.all }}"

- name: Delete pre-existing GlusterFS config
  file:
    path: /var/lib/glusterd
    state: absent
  delegate_to: "{{ item }}"
  with_items: "{{ glusterfs_nodes | default([]) }}"

- name: Delete pre-existing additional GlusterFS config
  file:
    path: /etc/glusterfs
    state: absent
  delegate_to: "{{ item }}"
  with_items: "{{ glusterfs_nodes | default([]) }}"

- name: Delete pre-existing Heketi config
  file:
    path: /var/lib/heketi
    state: absent
  delegate_to: "{{ item }}"
  with_items: "{{ glusterfs_nodes | default([]) }}"

- name: Delete Glusterfs logs
  file:
    path: /var/log/glusterfs
    state: absent
  delegate_to: "{{ item }}"
  with_items: "{{ glusterfs_nodes | default([]) }}"

- name: Delete deploy resources
  oc_obj:
    namespace: "{{ glusterfs_namespace }}"
    kind: "{{ item.kind }}"
    name: "{{ item.name | default(omit) }}"
    selector: "{{ item.selector | default(omit) }}"
    state: absent
  with_items:
  - kind: "template,route,service,jobs,dc,secret"
    selector: "deploy-heketi"
  - kind: "svc"
    name: "heketi-storage-endpoints"
  - kind: "secret"
    name: "heketi-{{ glusterfs_name }}-topology-secret"

- name: Get GlusterFS storage devices state
  command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
  register: devices_info
  delegate_to: "{{ item }}"
  with_items: "{{ glusterfs_nodes | default([]) }}"
  failed_when: False
  when: glusterfs_wipe

  # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
  shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
  delegate_to: "{{ item.item }}"
  with_items: "{{ devices_info.results }}"
  register: clear_devices
  until:
  - "'contains a filesystem in use' not in clear_devices.stderr"
  delay: 1
  retries: 30
  when:
  - glusterfs_wipe
  - item.stdout_lines | count > 0