--- - name: Delete pre-existing heketi resources oc_obj: namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" state: absent with_items: - kind: "template,route,service,dc,jobs,secret" selector: "deploy-heketi" - kind: "svc" name: "heketi-storage-endpoints" - kind: "svc" name: "heketi-storage" - kind: "secret" name: "heketi-{{ glusterfs_name }}-topology-secret" - kind: "template,route,service,dc" name: "heketi-{{ glusterfs_name }}" - kind: "svc" name: "heketi-db-{{ glusterfs_name }}-endpoints" - kind: "sa" name: "heketi-{{ glusterfs_name }}-service-account" - kind: "secret" name: "heketi-{{ glusterfs_name }}-admin-secret" - kind: "secret" name: "heketi-{{ glusterfs_name }}-config-secret" failed_when: False - name: Delete pre-existing GlusterFS resources oc_obj: namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name }}" state: absent with_items: - kind: template name: glusterfs - kind: daemonset name: "glusterfs-{{ glusterfs_name }}" - kind: storageclass name: "glusterfs-{{ glusterfs_name }}" - name: Unlabel any existing GlusterFS nodes oc_label: name: "{{ hostvars[item].openshift.node.nodename }}" kind: node state: absent labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - name: Delete pre-existing GlusterFS config file: path: /var/lib/glusterd state: absent delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" - name: Delete pre-existing additional GlusterFS config file: path: /etc/glusterfs state: absent delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" - name: Delete pre-existing Heketi config file: path: /var/lib/heketi state: absent delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" - name: Delete Glusterfs logs file: path: /var/log/glusterfs state: absent delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" - name: Delete deploy resources oc_obj: namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" state: absent with_items: - kind: "template,route,service,jobs,dc,secret" selector: "deploy-heketi" - kind: "svc" name: "heketi-storage-endpoints" - kind: "secret" name: "heketi-{{ glusterfs_name }}-topology-secret" - name: Get GlusterFS storage devices state command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" register: devices_info delegate_to: "{{ item }}" with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False when: glusterfs_wipe # Runs "lvremove -ff ; vgremove -fy ; pvremove -fy " for every device found to be a physical volume. - name: Clear GlusterFS storage device contents shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" register: clear_devices until: - "'contains a filesystem in use' not in clear_devices.stderr" delay: 1 retries: 30 when: - glusterfs_wipe - item.stdout_lines | count > 0