summaryrefslogtreecommitdiffstats
path: root/tasks/main.yml
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2018-07-05 16:22:58 +0200
committerSuren A. Chilingaryan <csa@suren.me>2018-07-05 16:22:58 +0200
commit813756495d5ef33fa3cc95d69b6d88418ebe7bb1 (patch)
treed923e31d8407c9d8c04407a1224ff1afa4be2a91 /tasks/main.yml
downloaditm-813756495d5ef33fa3cc95d69b6d88418ebe7bb1.tar.gz
itm-813756495d5ef33fa3cc95d69b6d88418ebe7bb1.tar.bz2
itm-813756495d5ef33fa3cc95d69b6d88418ebe7bb1.tar.xz
itm-813756495d5ef33fa3cc95d69b6d88418ebe7bb1.zip
Squashed 'roles/cuda/' content from commit f82a4fe
git-subtree-dir: roles/cuda git-subtree-split: f82a4fedb62a410b1f05454ee5ba5f2e5ff0a16c
Diffstat (limited to 'tasks/main.yml')
-rw-r--r--tasks/main.yml46
1 files changed, 46 insertions, 0 deletions
diff --git a/tasks/main.yml b/tasks/main.yml
new file mode 100644
index 0000000..6d846f8
--- /dev/null
+++ b/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+# tasks file for ansible-role-cuda
+- name: "Gather OS specific variables"
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version }}.yml"
+ - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version }}.yml"
+ - "{{ ansible_distribution|lower }}.yml"
+ - "{{ ansible_os_family|lower }}.yml"
+
+- block:
+ - include_tasks: configure_yum.yml
+ when: ansible_pkg_mgr == 'yum'
+
+ - include_tasks: configure_apt.yml
+ when: ansible_pkg_mgr == 'apt'
+
+ - name: Install CUDA and related packages (1.5-2GB download, also restarts if cuda_restart_node_on_install is set to True)
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ cuda_packages }}"
+ register: cuda_packages_installation
+ notify:
+ - ZZ CUDA Restart server
+ - ZZ CUDA Wait for server to restart
+
+ - name: Template CUDA paths to user environments
+ template:
+ src: cuda.sh.j2
+ dest: /etc/profile.d/cuda.sh
+ mode: 0755
+ when: cuda_bash_profile
+
+ - include_tasks: cuda_init.yml
+ when: cuda_init == True
+
+ # This is here because if we in the same playbook try to start slurmd without
+ # having run the cuda_init.sh script then slurmd doesn't start and the play fails.
+ # todo: reload nvidia modules/etc instead of restart
+ - name: flush the handlers - so that the node is rebooted after CUDA is installed and that the GPUs are initialized before we start slurm
+ meta: flush_handlers
+
+ when: gpu == True
+
+# vim:ft=ansible: