From 636510c3eec7317acdfded00d6237ed5f6ff3529 Mon Sep 17 00:00:00 2001 From: Andrew Block Date: Mon, 8 Feb 2016 00:10:01 -0600 Subject: New OSE3 docker host builder and OpenStack ansible provisioning support --- roles/common/pre_tasks/pre_tasks.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 roles/common/pre_tasks/pre_tasks.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..c573bff8c --- /dev/null +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -0,0 +1,4 @@ +--- +- name: Generate Environment ID + shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + register: env_random_id \ No newline at end of file -- cgit v1.2.1 From 80c3d3332507fe620fcab99e65f2ffd81d48a69e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 29 Mar 2016 16:52:05 -0500 Subject: Add subscription-manager support for Hosted or Satellite --- roles/subscription-manager/README.md | 95 ++++++++++++++++++++++ roles/subscription-manager/pre_tasks/pre_tasks.yml | 37 +++++++++ roles/subscription-manager/tasks/main.yml | 93 +++++++++++++++++++++ 3 files changed, 225 insertions(+) create mode 100644 roles/subscription-manager/README.md create mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml create mode 100644 roles/subscription-manager/tasks/main.yml (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md new file mode 100644 index 000000000..b140ad09a --- /dev/null +++ b/roles/subscription-manager/README.md @@ -0,0 +1,95 @@ +# Red Hat Subscription Manager Ansible Role + +## Parameters + +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: + +### rhsm_method + +Subscription Manager method to use for registration. Valid values are: + +* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** +* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** +* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) + +Default: none + +### rhsm_server + +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. + +Default: none + +### rhsm_username + +Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_password + +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_org + +Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. + +Default: none + +### rhsm_activationkey + +Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. + +Default: none + +### rhsm_pool + +Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +Default: none + +### rhsm_repos + +Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: + +rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' + +Default: none + +## Pre-tasks + +A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: + +``` + pre_tasks: + - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +``` + +## Tasks + +The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: + +``` + roles: + - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } +``` + +## Running the Playbook + +To register to RHSM Hosted with username and password: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +``` + +To register to a Satellite server with an activation key: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +``` + +To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..497f39353 --- /dev/null +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -0,0 +1,37 @@ +--- +- name: Initialize Subscription Manager fact + set_fact: + rhsm_skip: false + +- name: Determine if Subscription Manager should be skipped or not + set_fact: + rhsm_skip: true + when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' + +- name: Determine Subscription Manager method + fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" + when: + - rhsm_method != 'hosted' and rhsm_method != 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager host is set + fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + when: + - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' + - not rhsm_method == 'hosted' + - not rhsm_skip + +- name: Validate Subscription Manager organization is set + fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" + when: + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_method == 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager authentication is defined + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" + when: + - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - not rhsm_skip + diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml new file mode 100644 index 000000000..2e04a7a22 --- /dev/null +++ b/roles/subscription-manager/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: Initializing Subscription Manager authenticaiton method + set_fact: + rhsm_authentication: false + +# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set +- name: Setting Subscription Manager Activation Key Fact + set_fact: + rhsm_authentication: "key" + when: + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - not rhsm_authentication + +# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password +- name: Setting Subscription Manager Username and Password Fact + set_fact: + rhsm_authentication: "password" + when: + - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' + - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - not rhsm_authentication + +- name: Initializing registration status + set_fact: + registered: false + +- name: Checking subscription status (a failure means it is not registered and will be) + command: "/usr/bin/subscription-manager status" + ignore_errors: yes + changed_when: no + register: check_if_registered + +- name: Set registration fact + set_fact: + registered: true + when: check_if_registered.rc == 0 + +- name: Cleaning any old subscriptions + command: "/usr/bin/subscription-manager clean" + when: + - not registered + - rhsm_authentication is defined + +- name: Install Satellite certificate + command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + when: + - not registered + - rhsm_method == 'satellite' + +- name: Register to Satellite using activation key + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + when: + - not registered + - rhsm_authentication == 'key' + - rhsm_method == 'satellite' + +# This can apply to either Hosted or Satellite +- name: Register using username and password + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + when: + - not registered + - rhsm_authentication != "key" + +- name: Auto-attach to Subscription Manager Pool + command: "/usr/bin/subscription-manager attach --auto" + when: + - not registered + - rhsm_authentication != "key" + +- name: Attach to a specific pool + command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" + when: + - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' + - and not registered + - rhsm_authentication != "key" + +- name: Disable all repositories + command: "/usr/bin/subscription-manager repos --disable=*" + when: + - not registered + - not rhsm_authentication == "key" + +- name: Enable specified repositories + command: "/usr/bin/subscription-manager repos --enable={{ item }}" + with_items: rhsm_repos + when: + - not registered + - not rhsm_authentication == "key" + +- name: Cleaning yum repositories + command: "yum clean all" -- cgit v1.2.1 From 177950b76a185c20317aa0e89d356cdf8b97c4c3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 30 Mar 2016 15:46:31 -0500 Subject: Refactor role to dynamically determine rhsm_method * Removes rhsm_method * Renames rhsm_server to rhsm_satellite * Add additional pre_task checks (hosted + key) * Change conditionals from rhsm_method check to rhsm_satellite defined * Change repos disable/enable from key to if repos are defined * Update README and examples in inventory file --- roles/subscription-manager/README.md | 30 ++++++---------- roles/subscription-manager/pre_tasks/pre_tasks.yml | 41 ++++++++++++---------- roles/subscription-manager/tasks/main.yml | 20 +++++++---- 3 files changed, 46 insertions(+), 45 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index b140ad09a..e604c7475 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -2,21 +2,11 @@ ## Parameters -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: -### rhsm_method +### rhsm_satellite -Subscription Manager method to use for registration. Valid values are: - -* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** -* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** -* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) - -Default: none - -### rhsm_server - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. Default: none @@ -34,13 +24,13 @@ Default: none ### rhsm_org -Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. +Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. Default: none ### rhsm_activationkey -Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. +Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. Default: none @@ -52,7 +42,7 @@ Default: none ### rhsm_repos -Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. +Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: @@ -75,7 +65,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl ``` roles: - - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` ## Running the Playbook @@ -83,13 +73,13 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" ``` To register to a Satellite server with an activation key: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. +To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 497f39353..dcd56b2b9 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,37 +1,40 @@ --- - name: Initialize Subscription Manager fact set_fact: - rhsm_skip: false + rhsm_register: true -- name: Determine if Subscription Manager should be skipped or not +- name: Determine if Subscription Manager should be used set_fact: - rhsm_skip: true - when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' - -- name: Determine Subscription Manager method - fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" - when: - - rhsm_method != 'hosted' and rhsm_method != 'satellite' - - not rhsm_skip - -- name: Validate Subscription Manager host is set - fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + rhsm_register: false when: - - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' - - not rhsm_method == 'hosted' - - not rhsm_skip + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' + - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Validate Subscription Manager organization is set fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_method == 'satellite' - - not rhsm_skip + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + - rhsm_register - name: Validate Subscription Manager authentication is defined fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - not rhsm_skip + - rhsm_register +- name: Validate activation key and Hosted are not requested together + fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" + when: + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 2e04a7a22..78ceaccd1 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -32,7 +32,7 @@ changed_when: no register: check_if_registered -- name: Set registration fact +- name: Set registration fact if system is already registered set_fact: registered: true when: check_if_registered.rc == 0 @@ -44,17 +44,21 @@ - rhsm_authentication is defined - name: Install Satellite certificate - command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' - name: Register to Satellite using activation key command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered - rhsm_authentication == 'key' - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - name: Register using username and password @@ -80,14 +84,18 @@ command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Enable specified repositories command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Cleaning yum repositories command: "yum clean all" -- cgit v1.2.1 From 644f1e672c80bd10f34fabafcfe805c306e77b5e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 12:23:35 -0500 Subject: Fix bad syntax with extra 'and' in when using rhsm_pool --- roles/subscription-manager/tasks/main.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 78ceaccd1..414bf8f7a 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,4 +1,5 @@ --- + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -61,6 +62,7 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite + - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" when: @@ -72,12 +74,15 @@ when: - not registered - rhsm_authentication != "key" + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' - - and not registered + - rhsm_pool is defined + - rhsm_pool is not none + - rhsm_pool|trim != '' + - not registered - rhsm_authentication != "key" - name: Disable all repositories -- cgit v1.2.1 From 96aaa6df25774e05cda3e4a6f73b030ae989100a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 18:17:36 -0500 Subject: Refactor use of rhsm_password to prevent display to CLI --- roles/subscription-manager/README.md | 30 ++++++++++++++++++---- roles/subscription-manager/pre_tasks/pre_tasks.yml | 9 +++++++ roles/subscription-manager/tasks/main.yml | 23 ++++++++++++----- 3 files changed, 50 insertions(+), 12 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index e604c7475..a5dd1ac44 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -18,7 +18,9 @@ Default: none ### rhsm_password -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. Default: none @@ -50,7 +52,25 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none -## Pre-tasks +## Calling This Role +Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play + +### vars_prompt +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + +Add a prompt to capture **rhsm_password** + +``` +- hosts: localhost + vars_prompt: + # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + - name: "rhsm_password" + prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + confirm: yes + private: yes +``` + +### pre-tasks A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: @@ -59,7 +79,7 @@ A number of variable checks are performed before any tasks to ensure the proper - include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` -## Tasks +### roles The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: @@ -73,7 +93,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" ``` To register to a Satellite server with an activation key: @@ -82,4 +102,4 @@ To register to a Satellite server with an activation key: ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simply do not set any parameters. +To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index dcd56b2b9..31441785e 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,4 +1,13 @@ --- +- name: Set password fact + set_fact: + rhsm_password: "{{ rhsm_password }}" + no_log: true + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initialize Subscription Manager fact set_fact: rhsm_register: true diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 414bf8f7a..6e51be7e4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,12 @@ --- - +- name: Initialize rhsm_password variable if vars_prompt was used + set_fact: + rhsm_password: "{{ hostvars.localhost.rhsm_password }}" + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -19,8 +26,12 @@ set_fact: rhsm_authentication: "password" when: - - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' - - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - rhsm_username is defined + - rhsm_username is not none + - rhsm_username|trim != '' + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' - not rhsm_authentication - name: Initializing registration status @@ -62,18 +73,17 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + no_log: true when: - not registered - - rhsm_authentication != "key" + - rhsm_authentication == "password" - name: Auto-attach to Subscription Manager Pool command: "/usr/bin/subscription-manager attach --auto" when: - not registered - - rhsm_authentication != "key" - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool @@ -83,7 +93,6 @@ - rhsm_pool is not none - rhsm_pool|trim != '' - not registered - - rhsm_authentication != "key" - name: Disable all repositories command: "/usr/bin/subscription-manager repos --disable=*" -- cgit v1.2.1 From 71f4817263a21b6e2062b35928ebfab373d26278 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 11:02:57 -0500 Subject: Cosmetic changes to task names and move yum clean all to prereqs --- roles/subscription-manager/tasks/main.yml | 33 ++++++++++++++----------------- 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 6e51be7e4..adf3a8e85 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Initialize rhsm_password variable if vars_prompt was used +- name: "Initialize rhsm_password variable if vars_prompt was used" set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: @@ -7,12 +7,12 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initializing Subscription Manager authenticaiton method +- name: "Initializing Subscription Manager authenticaiton method" set_fact: rhsm_authentication: false # 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: Setting Subscription Manager Activation Key Fact +- name: "Setting Subscription Manager Activation Key Fact" set_fact: rhsm_authentication: "key" when: @@ -22,7 +22,7 @@ - not rhsm_authentication # If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: Setting Subscription Manager Username and Password Fact +- name: "Setting Subscription Manager Username and Password Fact" set_fact: rhsm_authentication: "password" when: @@ -34,28 +34,28 @@ - rhsm_password|trim != '' - not rhsm_authentication -- name: Initializing registration status +- name: "Initializing registration status" set_fact: registered: false -- name: Checking subscription status (a failure means it is not registered and will be) +- name: "Checking subscription status (a failure means it is not registered and will be)" command: "/usr/bin/subscription-manager status" ignore_errors: yes changed_when: no register: check_if_registered -- name: Set registration fact if system is already registered +- name: "Set registration fact if system is already registered" set_fact: registered: true when: check_if_registered.rc == 0 -- name: Cleaning any old subscriptions +- name: "Cleaning any old subscriptions" command: "/usr/bin/subscription-manager clean" when: - not registered - rhsm_authentication is defined -- name: Install Satellite certificate +- name: "Install Satellite certificate" command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered @@ -63,7 +63,7 @@ - rhsm_satellite is not none - rhsm_satellite|trim != '' -- name: Register to Satellite using activation key +- name: "Register to Satellite using activation key" command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered @@ -73,20 +73,20 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite -- name: Register using username and password +- name: "Register using username and password" command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" no_log: true when: - not registered - rhsm_authentication == "password" -- name: Auto-attach to Subscription Manager Pool +- name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" when: - not registered - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Attach to a specific pool +- name: "Attach to a specific pool" command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - rhsm_pool is defined @@ -94,7 +94,7 @@ - rhsm_pool|trim != '' - not registered -- name: Disable all repositories +- name: "Disable all repositories" command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered @@ -102,7 +102,7 @@ - rhsm_repos is not none - rhsm_repos|trim != '' -- name: Enable specified repositories +- name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: @@ -110,6 +110,3 @@ - rhsm_repos is defined - rhsm_repos is not none - rhsm_repos|trim != '' - -- name: Cleaning yum repositories - command: "yum clean all" -- cgit v1.2.1 From 39f973fcfd40fde18f5e92259d05e4ba6b30e22e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 18:44:23 -0500 Subject: Remove vars_prompt, add info to README to re-enable and for ansible-vault --- roles/subscription-manager/README.md | 91 +++++++++++++++++----- roles/subscription-manager/pre_tasks/pre_tasks.yml | 14 ++-- roles/subscription-manager/tasks/main.yml | 4 +- 3 files changed, 79 insertions(+), 30 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index a5dd1ac44..748de282c 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -20,7 +20,48 @@ Default: none Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. -NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. +NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. + +1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: + + ``` + - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block + vars_prompt: + - name: "rhsm_password" + prompt: "Subscription Manager password" + confirm: yes + private: yes + # End of vars_prompt code block + pre_tasks: + ``` + +2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): + 1. Create a file to contain the variable such as **secrets.yml**: + + ``` + --- + rhsm_password: "my_secret_password" + # other variables can optionally be placed here as well + ``` + + 2. Encrypt the file with **ansible-vault**: + + ``` + $ ansible-vault encrypt secrets.yml + Vault password: + Confirm Vault password: + Encryption successful + ``` + + 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: + + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + ``` + + NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. Default: none @@ -53,21 +94,24 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none ## Calling This Role -Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play +Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. ### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. -Add a prompt to capture **rhsm_password** +To Add a prompt to capture **rhsm_password**: ``` - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block vars_prompt: - # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable - name: "rhsm_password" - prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + prompt: "Subscription Manager password" confirm: yes private: yes + # End of vars_prompt code block + pre_tasks: ``` ### pre-tasks @@ -75,8 +119,8 @@ Add a prompt to capture **rhsm_password** A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: ``` - pre_tasks: - - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +pre_tasks: +- include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` ### roles @@ -84,22 +128,29 @@ A number of variable checks are performed before any tasks to ensure the proper The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: ``` - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } +roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` -## Running the Playbook +## Running Playbooks with this Role -To register to RHSM Hosted with username and password: +- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" -``` + ``` + $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " + ``` -To register to a Satellite server with an activation key: +- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" -``` + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + + ``` + +- To register to a Satellite server with an activation key: + + ``` + $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " -To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. + ``` +- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 31441785e..8a4d8d06d 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,5 +1,5 @@ --- -- name: Set password fact +- name: "Set password fact" set_fact: rhsm_password: "{{ rhsm_password }}" no_log: true @@ -8,11 +8,11 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initialize Subscription Manager fact +- name: "Initialize Subscription Manager fact" set_fact: rhsm_register: true -- name: Determine if Subscription Manager should be used +- name: "Determine if Subscription Manager should be used" set_fact: rhsm_register: false when: @@ -23,7 +23,7 @@ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Validate Subscription Manager organization is set +- name: "Validate Subscription Manager organization is set" fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' @@ -32,14 +32,14 @@ - rhsm_satellite|trim != '' - rhsm_register -- name: Validate Subscription Manager authentication is defined - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" +- name: "Validate Subscription Manager authentication is defined" + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_register -- name: Validate activation key and Hosted are not requested together +- name: "Validate activation key and Hosted are not requested together" fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" when: - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index adf3a8e85..bdb8ca7c4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -3,9 +3,7 @@ set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' + - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - name: "Initializing Subscription Manager authenticaiton method" set_fact: -- cgit v1.2.1 From 305140bfaeb6cd1bbe34279cbd6750d1136816d6 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Sat, 23 Apr 2016 12:50:25 -0500 Subject: Add org parameter to Satellite with user/pass --- roles/subscription-manager/tasks/main.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..9bc430665 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -77,6 +77,18 @@ when: - not registered - rhsm_authentication == "password" + - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + +# This can apply to either Hosted or Satellite +- name: "Register using username, password and organization" + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" + no_log: true + when: + - not registered + - rhsm_authentication == "password" + - rhsm_org is defined + - rhsm_org is not none + - rhsm_org|trim != '' - name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" -- cgit v1.2.1 From 150b709052688c1cf1ab435c9775501154c7e35a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 27 Apr 2016 17:14:42 -0500 Subject: Fix typo in task name --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..f3bd8b656 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -5,7 +5,7 @@ when: - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' -- name: "Initializing Subscription Manager authenticaiton method" +- name: "Initializing Subscription Manager authentication method" set_fact: rhsm_authentication: false -- cgit v1.2.1 From ca1b17aeeb8ed4f4db0a90a11bccd9ea009f9eac Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 13 May 2016 16:25:19 -0400 Subject: Changes by JayKayy for a full provision of OpenShift on OpenStack --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/hostnames/tasks/main.yaml | 17 +++++++++++++++ roles/hostnames/templates/records.template.yaml | 28 +++++++++++++++++++++++++ roles/hostnames/test/inv | 12 +++++++++++ roles/hostnames/test/roles | 1 + roles/hostnames/test/test.retry | 3 +++ roles/hostnames/test/test.yaml | 21 +++++++++++++++++++ roles/hostnames/vars/main.yaml | 2 ++ roles/hostnames/vars/records.yaml | 28 +++++++++++++++++++++++++ 9 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 roles/hostnames/tasks/main.yaml create mode 100644 roles/hostnames/templates/records.template.yaml create mode 100644 roles/hostnames/test/inv create mode 120000 roles/hostnames/test/roles create mode 100644 roles/hostnames/test/test.retry create mode 100644 roles/hostnames/test/test.yaml create mode 100644 roles/hostnames/vars/main.yaml create mode 100644 roles/hostnames/vars/records.yaml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index c573bff8c..9dd14c30c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" - register: env_random_id \ No newline at end of file + register: env_random_id diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml new file mode 100644 index 000000000..921cd664b --- /dev/null +++ b/roles/hostnames/tasks/main.yaml @@ -0,0 +1,17 @@ +--- + - name: Setting master(s) hostname + hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + + - name: Setting node(s) hostname + hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + + - name: "Templating records" + become: false + remote_user: cloud-user + template: + src: "{{ role_path }}/templates/records.template.yaml" + dest: "/tmp/records.yaml" + force: yes + delegate_to: localhost diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml new file mode 100644 index 000000000..a916fd2b3 --- /dev/null +++ b/roles/hostnames/templates/records.template.yaml @@ -0,0 +1,28 @@ +--- +dns_records_add: + - view: private + zone: {{ dns_domain }} + entries: +{% for mst in groups['openshift_masters'] %} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_private_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_private_ip'] }} +{% endfor %} + - view: public + zone: {{ dns_domain}} + entries: +{% for mst in groups['openshift_masters']%} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_public_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_public_ip'] }} +{% endfor %} diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv new file mode 100644 index 000000000..ffbe6e03d --- /dev/null +++ b/roles/hostnames/test/inv @@ -0,0 +1,12 @@ +[all:vars] +dns_domain=example.com + +[openshift_masters] +192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 +192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 + +[openshift_nodes] +192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 + +#[dns] +#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/hostnames/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry new file mode 100644 index 000000000..63fc08e4c --- /dev/null +++ b/roles/hostnames/test/test.retry @@ -0,0 +1,3 @@ +192.168.124.117 +192.168.124.40 +192.168.124.41 diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml new file mode 100644 index 000000000..34bf37942 --- /dev/null +++ b/roles/hostnames/test/test.yaml @@ -0,0 +1,21 @@ +--- +- hosts: all + roles: + - role: hostnames + +# - debug: +# +# - hosts: dns +# roles: +# - role: dns-server +# named_config_views: +# - name: private +# acl_entry: +# - 192.168.124.40/32 +# - 192.168.124.40/32 +# zone: +# - dns_domain: example.com +# - name: public +# zone: +# - dns_domain: example.com +# - role: dns diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml new file mode 100644 index 000000000..3eecb8dc4 --- /dev/null +++ b/roles/hostnames/vars/main.yaml @@ -0,0 +1,2 @@ +--- +counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml new file mode 100644 index 000000000..3bf12ae2b --- /dev/null +++ b/roles/hostnames/vars/records.yaml @@ -0,0 +1,28 @@ +--- + - name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 -- cgit v1.2.1 From c8f84c0aebe1fe9c00498921c5f83022a2e873c3 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 14:01:22 -0400 Subject: Changes to allow runs from inside a container. Also allows for running upstream openshift-ansible installer --- roles/hostnames/tasks/main.yaml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 921cd664b..c34d07915 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -15,3 +15,6 @@ dest: "/tmp/records.yaml" force: yes delegate_to: localhost + + - name: "Updating hostname facts" + setup: filter=ansible_hostname -- cgit v1.2.1 From e4c6ba27a5fe784143831e02e5181794c1b953b2 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 18:01:05 -0400 Subject: Reverting previous commit and making template adjustments --- roles/hostnames/tasks/main.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index c34d07915..700845e47 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -3,10 +3,18 @@ hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_masters' in group_names" + - name: Setting facts for masters + set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + - name: Setting node(s) hostname hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_nodes' in group_names" + - name: Setting facts for nodes + set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + - name: "Templating records" become: false remote_user: cloud-user -- cgit v1.2.1 From d827e1796c6a3705007365cb58aa6b36a92d3b6e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 19:10:27 -0400 Subject: Subscription manager role should accomodate orgs with spaces --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index f3bd8b656..c73204a29 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -62,7 +62,7 @@ - rhsm_satellite|trim != '' - name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" when: - not registered - rhsm_authentication == 'key' -- cgit v1.2.1 From e2181a706679666a6fff2e2aaca648ed982060bd Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 8 Jun 2016 14:58:36 -0400 Subject: Channging hard coded host groups to match openshift-ansible expected host groups. Importing byo playbook now instead of nested ansible run. Need to refactor how we generate hostnames to make it fit this. --- roles/hostnames/tasks/main.yaml | 17 ++++++++--------- roles/hostnames/templates/records.template.yaml | 8 ++++---- 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 700845e47..bf2fafb97 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,23 +1,22 @@ --- - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: "Templating records" become: false - remote_user: cloud-user template: src: "{{ role_path }}/templates/records.template.yaml" dest: "/tmp/records.yaml" diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml index a916fd2b3..2f2420464 100644 --- a/roles/hostnames/templates/records.template.yaml +++ b/roles/hostnames/templates/records.template.yaml @@ -3,12 +3,12 @@ dns_records_add: - view: private zone: {{ dns_domain }} entries: -{% for mst in groups['openshift_masters'] %} +{% for mst in groups['masters'] %} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_private_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_private_ip'] }} @@ -16,12 +16,12 @@ dns_records_add: - view: public zone: {{ dns_domain}} entries: -{% for mst in groups['openshift_masters']%} +{% for mst in groups['masters']%} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_public_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_public_ip'] }} -- cgit v1.2.1 From 4d6eb644d78f4b972154ade3d12c23b28dbe19e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Thu, 9 Jun 2016 11:34:07 -0400 Subject: Updated to run as root rather than cloud-user, for now... --- roles/common/pre_tasks/pre_tasks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 9dd14c30c..ed57a2993 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID - shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + shell: echo "$(date +%s)" register: env_random_id -- cgit v1.2.1 From 3866232daed8ce1a48aa2db6f2f6c541e90756ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Fri, 17 Jun 2016 14:48:37 -0400 Subject: Cleande up hostname role to make it more generic --- roles/hostnames/tasks/main.yaml | 43 ++++++++++++------------- roles/hostnames/templates/records.template.yaml | 28 ---------------- 2 files changed, 21 insertions(+), 50 deletions(-) delete mode 100644 roles/hostnames/templates/records.template.yaml (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bf2fafb97..bb45445f5 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,27 +1,26 @@ --- - - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" - - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" - - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" - - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg - - name: "Templating records" - become: false - template: - src: "{{ role_path }}/templates/records.template.yaml" - dest: "/tmp/records.yaml" - force: yes - delegate_to: localhost - - - name: "Updating hostname facts" - setup: filter=ansible_hostname +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml deleted file mode 100644 index 2f2420464..000000000 --- a/roles/hostnames/templates/records.template.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -dns_records_add: - - view: private - zone: {{ dns_domain }} - entries: -{% for mst in groups['masters'] %} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_private_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_private_ip'] }} -{% endfor %} - - view: public - zone: {{ dns_domain}} - entries: -{% for mst in groups['masters']%} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_public_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_public_ip'] }} -{% endfor %} -- cgit v1.2.1 From fbf2f35080f666f68994e30174a590b8308b59f3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 15 Jul 2016 14:05:13 -0500 Subject: Fixes Issue #163 if rhsm_password is not defined --- roles/subscription-manager/pre_tasks/pre_tasks.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 8a4d8d06d..b21356cf2 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,12 +1,8 @@ --- - name: "Set password fact" set_fact: - rhsm_password: "{{ rhsm_password }}" + rhsm_password: "{{ rhsm_password | default(None) }}" no_log: true - when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - name: "Initialize Subscription Manager fact" set_fact: -- cgit v1.2.1 From c757fd690d24865ef3b5b9a1b536120299b39a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Sun, 21 Aug 2016 02:12:53 -0400 Subject: Updated env_id to be a sub-domain + make the logic a bit more flexible --- roles/common/pre_tasks/pre_tasks.yml | 21 +++++++++++++++++++-- roles/hostnames/tasks/main.yaml | 4 ++-- 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index ed57a2993..1ba1ea55d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,21 @@ --- - name: Generate Environment ID - shell: echo "$(date +%s)" - register: env_random_id + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + delegate_to: localhost diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bb45445f5..bf142d653 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,11 +1,11 @@ --- - name: Setting Hostname Fact set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - name: Setting FQDN Fact set_fact: - new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - name: Setting hostname and DNS domain hostname: name="{{ new_fqdn }}" -- cgit v1.2.1 From fbda334b6797eb0109cd9c13afb99a47e3916b36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 15 Nov 2016 22:26:58 -0500 Subject: Fixing ansible impl to work with OSP9 and ansible 2.2 --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 0b3aa351f..2dd14b48e 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -114,7 +114,7 @@ - name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: rhsm_repos + with_items: "{{ rhsm_repos }}" when: - not registered - rhsm_repos is defined -- cgit v1.2.1 From 11b48fe4e237950f9d9e9a0e66d8b15f48be1ea0 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 21 Dec 2016 10:37:40 -0500 Subject: Openstack heat (#2) * Adding a role to invoke openstack heat * Adding readme * Pulling parameters out to inventory file * start of end-to-end playbook * More enhancements and refactoring to make dynamic inventory the driver for an openshift install * Switching to variable substituted path to config.yaml playbook * Changes to allow defining of number of nodes/infranodes. * Added labels to inventory * Start of end-to-end functionality * Enhancements to support openstack heat provisioning * Updating inventory sample to remove some deprecation warnings * Working towards making the secure-registry role 'become' aware * Fixing node labels and removing secure-registry as it's no longer needed * No longer need insecure registry line, as installer will secure our registry * Adjusted dynamic inventory to filter by clusterid * Minor updates to dynamic inventory bug * Adding a refactored sample inventory directory * Refactoring playbooks for better directory structure, and to narrow down host groups * Adding volume mounts to heat template * Moving dns playbooks back to original location * Fixing incorrect file path * Cleaning up inventory samples * One more hostname to clean up * Changing var name * changed openshift-provision to openshift-prep * Adjusting current provision script to avoid breakage by new openstack-heat code --- roles/common/pre_tasks/pre_tasks.yml | 5 + roles/openshift-prep/tasks/main.yml | 4 + roles/openshift-prep/tasks/prerequisites.yml | 36 ++ roles/openstack-stack/README.md | 9 + roles/openstack-stack/files/heat_stack.yaml | 684 +++++++++++++++++++++ roles/openstack-stack/files/heat_stack_server.yaml | 156 +++++ roles/openstack-stack/files/user-data | 13 + roles/openstack-stack/tasks/main.yml | 31 + roles/openstack-stack/test/roles | 1 + roles/openstack-stack/test/stack-create-test.yml | 17 + 10 files changed, 956 insertions(+) create mode 100644 roles/openshift-prep/tasks/main.yml create mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openstack-stack/README.md create mode 100644 roles/openstack-stack/files/heat_stack.yaml create mode 100644 roles/openstack-stack/files/heat_stack_server.yaml create mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/tasks/main.yml create mode 120000 roles/openstack-stack/test/roles create mode 100644 roles/openstack-stack/test/stack-create-test.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 1ba1ea55d..71a989b30 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -15,6 +15,11 @@ env_id: "{{ env_id | default(default_env_id) }}" delegate_to: localhost +- name: Set Dynamic Inventory Filters + shell: > + export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} + delegate_to: localhost + - name: Updating DNS domain to include env_id (if not empty) set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml new file mode 100644 index 000000000..5e484e75f --- /dev/null +++ b/roles/openshift-prep/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# Starting Point for OpenShift Installation and Configuration +- include: prerequisites.yml + tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml new file mode 100644 index 000000000..1286905f4 --- /dev/null +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -0,0 +1,36 @@ +--- +- name: "Cleaning yum repositories" + command: "yum clean all" + +- name: "Install required packages" + yum: + name: "{{ item }}" + state: latest + with_items: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - atomic-openshift-utils + - vim-enhanced + +- name: "Update all packages (this can take a very long time)" + yum: + name: "*" + state: latest + +- name: "Verify hostname" + shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' + register: hostname_fqdn + +- name: "Set hostname if required" + hostname: + name: "{{ ansible_fqdn }}" + when: hostname_fqdn.stdout != ansible_fqdn + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md new file mode 100644 index 000000000..509c9de6c --- /dev/null +++ b/roles/openstack-stack/README.md @@ -0,0 +1,9 @@ +# Role openstack-stack + +Role for spinning up instances using OpenStack Heat. + +## To Test + +``` +ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +``` diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml new file mode 100644 index 000000000..058f7a7ad --- /dev/null +++ b/roles/openstack-stack/files/heat_stack.yaml @@ -0,0 +1,684 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster + +parameters: + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + subnet_24_prefix: + type: string + label: subnet /24 prefix + description: /24 subnet prefix of the network of the cluster (dot separated number triplet) + + dns_nameservers: + type: comma_delimited_list + label: DNS nameservers list + description: List of DNS nameservers + + external_net: + type: string + label: External network + description: Name of the external network + default: external + + ssh_public_key: + type: string + label: SSH public key + description: SSH public key + hidden: true + + ssh_incoming: + type: string + label: Source of ssh connections + description: Source of legitimate ssh connections + default: 0.0.0.0/0 + + node_port_incoming: + type: string + label: Source of node port connections + description: Authorized sources targetting node ports + default: 0.0.0.0/0 + + num_etcd: + type: number + label: Number of etcd nodes + description: Number of etcd nodes + + num_masters: + type: number + label: Number of masters + description: Number of masters + + num_nodes: + type: number + label: Number of compute nodes + description: Number of compute nodes + + num_infra: + type: number + label: Number of infrastructure nodes + description: Number of infrastructure nodes + + num_dns: + type: number + label: Number of dns servers + description: Number of dns servers + + etcd_image: + type: string + label: Etcd image + description: Name of the image for the etcd servers + + master_image: + type: string + label: Master image + description: Name of the image for the master servers + + node_image: + type: string + label: Node image + description: Name of the image for the compute node servers + + infra_image: + type: string + label: Infra image + description: Name of the image for the infra node servers + + dns_image: + type: string + label: DNS image + description: Name of the image for the DNS server + + etcd_flavor: + type: string + label: Etcd flavor + description: Flavor of the etcd servers + + master_flavor: + type: string + label: Master flavor + description: Flavor of the master servers + + node_flavor: + type: string + label: Node flavor + description: Flavor of the compute node servers + + infra_flavor: + type: string + label: Infra flavor + description: Flavor of the infra node servers + + dns_flavor: + type: string + label: DNS flavor + description: Flavor of the DNS server + + master_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + app_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + infra_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + dns_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + etcd_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: { get_param: cluster_id } + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: + - 10.9.48.31 +# - { get_param: dns_nameservers } +# repeat: +# for_each: +# <%nameserver%>: { get_param: dns_nameservers } +# template: <%nameserver%> + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: { get_param: cluster_id } + external_gateway_info: + network: { get_param: external_net } + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: { get_param: cluster_id } +# public_key: { get_param: ssh_public_key } + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: { get_param: node_port_incoming } + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_etcd } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: etcd + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: etcd + image: { get_param: etcd_image } + flavor: { get_param: etcd_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: etcd_volume_size } + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_masters } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: master + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: master + image: { get_param: master_image } + flavor: { get_param: master_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: master_volume_size } + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_nodes } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: app + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: app + image: { get_param: node_image } + flavor: { get_param: node_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: app_volume_size } + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_infra } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: infra + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: infra + image: { get_param: infra_image } + flavor: { get_param: infra_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: infra_volume_size } + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_dns } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: dns + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: dns + image: { get_param: dns_image } + flavor: { get_param: dns_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: dns_volume_size } + depends_on: + - interface + diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml new file mode 100644 index 000000000..978da4f0b --- /dev/null +++ b/roles/openstack-stack/files/heat_stack_server.yaml @@ -0,0 +1,156 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: { get_file: user-data } + user_data_format: RAW + metadata: + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/files/user-data @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml new file mode 100644 index 000000000..c953cb603 --- /dev/null +++ b/roles/openstack-stack/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: create stack + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: present + template: 'roles/openstack-stack/files/heat_stack.yaml' + wait: yes + parameters: + cluster_env: "{{ dns_domain }}" + cluster_id: "{{ stack_name }}" + subnet_24_prefix: "{{ subnet_prefix }}" + dns_nameservers: "{{ dns_nameservers }}" + external_net: "{{ external_network }}" + ssh_public_key: "{{ ssh_public_key }}" + num_etcd: "{{ num_etcd }}" + num_masters: "{{ num_masters }}" + num_nodes: "{{ num_nodes }}" + num_infra: "{{ num_infra }}" + num_dns: "{{ num_dns }}" + etcd_image: "{{ openstack_image }}" + master_image: "{{ openstack_image }}" + node_image: "{{ openstack_image }}" + infra_image: "{{ openstack_image }}" + dns_image: "{{ openstack_image }}" + etcd_flavor: "{{ etcd_flavor }}" + master_flavor: "{{ master_flavor }}" + node_flavor: "{{ node_flavor }}" + infra_flavor: "{{ infra_flavor }}" + dns_flavor: "{{ dns_flavor }}" diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/openstack-stack/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml new file mode 100644 index 000000000..94e312ee3 --- /dev/null +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -0,0 +1,17 @@ +--- +- hosts: localhost + roles: + - role: openstack-stack + stack_name: test-stack + dns_domain: "{{ openstack_dns_domain }}" + dns_nameservers: "{{ openstack_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + etcd_flavor: "{{ openstack_default_flavor }}" + master_flavor: "{{ openstack_default_flavor }}" + node_flavor: "{{ openstack_default_flavor }}" + infra_flavor: "{{ openstack_default_flavor }}" + dns_flavor: "{{ openstack_default_flavor }}" + external_network: "{{ openstack_external_network_name }}" + -- cgit v1.2.1 From f0ca54ac5c4408284105fe877d81e8afbfbc2991 Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Fri, 13 Jan 2017 15:47:50 -0500 Subject: Making providers common (#126) * Making providers common * moving directory locations * using links and removal of vars file callout * rename of file * went block crazy * cleanup * add to remove * missing Pyyaml package in README * let docker actually setup docker storage and start the service * name change * Fix for vmware. Will variablize in the future * catchup to test common providers against master * should only be schedulable nodes --- roles/docker-storage-setup/files/docker-storage-setup | 4 ++++ roles/docker-storage-setup/tasks/main.yaml | 8 ++++++++ 2 files changed, 12 insertions(+) create mode 100644 roles/docker-storage-setup/files/docker-storage-setup create mode 100644 roles/docker-storage-setup/tasks/main.yaml (limited to 'roles') diff --git a/roles/docker-storage-setup/files/docker-storage-setup b/roles/docker-storage-setup/files/docker-storage-setup new file mode 100644 index 000000000..5e9d494a1 --- /dev/null +++ b/roles/docker-storage-setup/files/docker-storage-setup @@ -0,0 +1,4 @@ +DEVS=/dev/sdb +VG=docker-vol +DATA_SIZE=95%VG +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml new file mode 100644 index 000000000..32f79fff9 --- /dev/null +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -0,0 +1,8 @@ +--- +- name: create the docker-storage-setup config file + copy: + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 -- cgit v1.2.1 From 986d04922446da75879ce5a9064bd0db1477ac7f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 26 Jan 2017 17:37:06 -0500 Subject: update for yamllint errors --- roles/docker-storage-setup/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'roles') diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 32f79fff9..17b13f27f 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,8 +1,8 @@ --- - name: create the docker-storage-setup config file copy: - src: docker-storage-setup - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 -- cgit v1.2.1 From 3bf8df1a873785a09bf3c1827bfb5097955c5e44 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 7 Feb 2017 01:12:58 -0500 Subject: Fixing two significant bugs in the HEAT deployment (#13) --- roles/openstack-stack/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index c953cb603..efee08c0e 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -29,3 +29,6 @@ node_flavor: "{{ node_flavor }}" infra_flavor: "{{ infra_flavor }}" dns_flavor: "{{ dns_flavor }}" + master_volume_size: "{{ master_volume_size }}" + app_volume_size: "{{ app_volume_size }}" + infra_volume_size: "{{ infra_volume_size }}" -- cgit v1.2.1 From fdac6976d4b48c11b8de253ef8afa34af0da8cdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 20 Feb 2017 12:56:21 -0500 Subject: Ensure DNS configuration has wildcards set for infra nodes (#24) * Ensure DNS configuration has wildcards set for infra nodes * Updated to include all cluster hosts for DNS entries --- roles/common/pre_tasks/pre_tasks.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 71a989b30..06a56605d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -24,3 +24,13 @@ set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost -- cgit v1.2.1 From c90d5323afc575246df2f50e9125069f3c12e81e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 25 Apr 2017 23:17:38 -0400 Subject: Stack refactor (#38) * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Replacing stack parameters with jinja expressions * Updating sample inventory to work with latest dynamic inventory changes * updating inventory with host group mapping. making sync keys optional * Missing cluster_hosts group * Updating to add infra_hosts * Updating inventory per comments from oybed and sabre1041 --- roles/openstack-stack/defaults/main.yml | 10 + roles/openstack-stack/files/heat_stack.yaml | 684 --------------------- roles/openstack-stack/files/heat_stack_server.yaml | 156 ----- roles/openstack-stack/files/user-data | 13 - roles/openstack-stack/tasks/main.yml | 59 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 551 +++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 170 +++++ roles/openstack-stack/templates/user_data.j2 | 13 + 8 files changed, 777 insertions(+), 879 deletions(-) create mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/files/heat_stack.yaml delete mode 100644 roles/openstack-stack/files/heat_stack_server.yaml delete mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 create mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openstack-stack/templates/user_data.j2 (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml new file mode 100644 index 000000000..8aefe039d --- /dev/null +++ b/roles/openstack-stack/defaults/main.yml @@ -0,0 +1,10 @@ +--- +dns_volume_size: 1 +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +etcd_volume_size: 2 diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml deleted file mode 100644 index 058f7a7ad..000000000 --- a/roles/openstack-stack/files/heat_stack.yaml +++ /dev/null @@ -1,684 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - subnet_24_prefix: - type: string - label: subnet /24 prefix - description: /24 subnet prefix of the network of the cluster (dot separated number triplet) - - dns_nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external_net: - type: string - label: External network - description: Name of the external network - default: external - - ssh_public_key: - type: string - label: SSH public key - description: SSH public key - hidden: true - - ssh_incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - default: 0.0.0.0/0 - - node_port_incoming: - type: string - label: Source of node port connections - description: Authorized sources targetting node ports - default: 0.0.0.0/0 - - num_etcd: - type: number - label: Number of etcd nodes - description: Number of etcd nodes - - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - - num_dns: - type: number - label: Number of dns servers - description: Number of dns servers - - etcd_image: - type: string - label: Etcd image - description: Name of the image for the etcd servers - - master_image: - type: string - label: Master image - description: Name of the image for the master servers - - node_image: - type: string - label: Node image - description: Name of the image for the compute node servers - - infra_image: - type: string - label: Infra image - description: Name of the image for the infra node servers - - dns_image: - type: string - label: DNS image - description: Name of the image for the DNS server - - etcd_flavor: - type: string - label: Etcd flavor - description: Flavor of the etcd servers - - master_flavor: - type: string - label: Master flavor - description: Flavor of the master servers - - node_flavor: - type: string - label: Node flavor - description: Flavor of the compute node servers - - infra_flavor: - type: string - label: Infra flavor - description: Flavor of the infra node servers - - dns_flavor: - type: string - label: DNS flavor - description: Flavor of the DNS server - - master_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - app_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - infra_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - dns_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - etcd_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - 1 - - addr - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: { get_param: cluster_id } - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: - - 10.9.48.31 -# - { get_param: dns_nameservers } -# repeat: -# for_each: -# <%nameserver%>: { get_param: dns_nameservers } -# template: <%nameserver%> - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: { get_param: cluster_id } - external_gateway_info: - network: { get_param: external_net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: { get_param: cluster_id } -# public_key: { get_param: ssh_public_key } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: { get_param: node_port_incoming } - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_etcd } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: etcd - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: etcd-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: etcd_volume_size } - depends_on: - - interface - - masters: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_masters } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: master - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: master_volume_size } - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_nodes } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtype-k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: app - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: app - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: app_volume_size } - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_infra } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtypek8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: infra - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: infra_volume_size } - depends_on: - - interface - - dns: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_dns } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: dns - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: dns - image: { get_param: dns_image } - flavor: { get_param: dns_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: dns-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: dns_volume_size } - depends_on: - - interface - diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml deleted file mode 100644 index 978da4f0b..000000000 --- a/roles/openstack-stack/files/heat_stack_server.yaml +++ /dev/null @@ -1,156 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: { get_file: user-data } - user_data_format: RAW - metadata: - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/files/user-data +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index efee08c0e..71c7bbe0d 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,34 +1,41 @@ --- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + server_template_path: "{{ stack_template_pre.path }}/server.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ server_template_path }}" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" + - name: create stack ignore_errors: False register: stack_create os_stack: name: "{{ stack_name }}" state: present - template: 'roles/openstack-stack/files/heat_stack.yaml' + template: "{{ stack_template_path }}" wait: yes - parameters: - cluster_env: "{{ dns_domain }}" - cluster_id: "{{ stack_name }}" - subnet_24_prefix: "{{ subnet_prefix }}" - dns_nameservers: "{{ dns_nameservers }}" - external_net: "{{ external_network }}" - ssh_public_key: "{{ ssh_public_key }}" - num_etcd: "{{ num_etcd }}" - num_masters: "{{ num_masters }}" - num_nodes: "{{ num_nodes }}" - num_infra: "{{ num_infra }}" - num_dns: "{{ num_dns }}" - etcd_image: "{{ openstack_image }}" - master_image: "{{ openstack_image }}" - node_image: "{{ openstack_image }}" - infra_image: "{{ openstack_image }}" - dns_image: "{{ openstack_image }}" - etcd_flavor: "{{ etcd_flavor }}" - master_flavor: "{{ master_flavor }}" - node_flavor: "{{ node_flavor }}" - infra_flavor: "{{ infra_flavor }}" - dns_flavor: "{{ dns_flavor }}" - master_volume_size: "{{ master_volume_size }}" - app_volume_size: "{{ app_volume_size }}" - infra_volume_size: "{{ infra_volume_size }}" + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..bc9547f66 --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -0,0 +1,551 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: + {% for nameserver in dns_nameservers %} + - {{ nameserver }} + {% endfor %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: etcd + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_image }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ etcd_volume_size }} + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: master + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_image }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ master_volume_size }} + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: app + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: + region: primary + image: {{ openstack_image }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ app_volume_size }} + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: infra + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: + region: infra + image: {{ openstack_image }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ infra_volume_size }} + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: dns + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_image }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ dns_volume_size }} + depends_on: + - interface + diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..5851d3b9b --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,170 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty -- cgit v1.2.1 From 7304ed4611192f6daa88f84d8b47d3e76514a03b Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Thu, 27 Apr 2017 16:58:41 -0400 Subject: First attempt at a simple multi-master support (#39) * First attempt at a simple multi-master support * Removing unneeded inventory * adding default number of masters and lower number of nodes --- roles/openstack-stack/defaults/main.yml | 2 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 69 ++++++++++++++++++++++ 2 files changed, 71 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 8aefe039d..2a4ef3a45 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -2,6 +2,8 @@ dns_volume_size: 1 ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 num_etcd: 0 num_masters: 1 num_nodes: 1 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index bc9547f66..c367aabe7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -342,6 +342,31 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} +{% if num_masters is greaterthan 1 %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% endif %} +{% endif %} etcd: type: OS::Heat::ResourceGroup @@ -382,6 +407,47 @@ resources: depends_on: - interface +{% if num_masters is greaterthan 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: lb + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_image }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: lb-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: 5 + depends_on: + - interface +{% endif %} + masters: type: OS::Heat::ResourceGroup properties: @@ -412,6 +478,9 @@ resources: secgrp: - { get_resource: master-secgrp } - { get_resource: node-secgrp } +{% if num_etcd is equalto 0 %} + - { get_resource: etcd-secgrp } +{% endif %} floating_network: {{ external_network }} net_name: str_replace: -- cgit v1.2.1 From 215807f53977bc561b3791e37136f140383605ef Mon Sep 17 00:00:00 2001 From: Eduardo Minguez Perez Date: Tue, 16 May 2017 16:21:10 +0200 Subject: Removed hardcoded values from ansible roles --- roles/docker-storage-setup/defaults/main.yaml | 5 +++++ roles/docker-storage-setup/files/docker-storage-setup | 4 ---- roles/docker-storage-setup/tasks/main.yaml | 4 ++-- roles/docker-storage-setup/templates/docker-storage-setup.j2 | 4 ++++ 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 roles/docker-storage-setup/defaults/main.yaml delete mode 100644 roles/docker-storage-setup/files/docker-storage-setup create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup.j2 (limited to 'roles') diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml new file mode 100644 index 000000000..e36f1b85a --- /dev/null +++ b/roles/docker-storage-setup/defaults/main.yaml @@ -0,0 +1,5 @@ +--- +docker_dev: "/dev/sdb" +docker_vg: "docker-vol" +docker_data_size: "95%VG" +docker_dm_basesize: "3G" diff --git a/roles/docker-storage-setup/files/docker-storage-setup b/roles/docker-storage-setup/files/docker-storage-setup deleted file mode 100644 index 5e9d494a1..000000000 --- a/roles/docker-storage-setup/files/docker-storage-setup +++ /dev/null @@ -1,4 +0,0 @@ -DEVS=/dev/sdb -VG=docker-vol -DATA_SIZE=95%VG -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 17b13f27f..7202bc46b 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: create the docker-storage-setup config file - copy: - src: docker-storage-setup + template: + src: "{{ role_path }}/templates/docker-storage-setup.j2" dest: /etc/sysconfig/docker-storage-setup owner: root group: root diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 new file mode 100644 index 000000000..a5203d7e4 --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_devs }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.1 From c9305e842efb4098477e249d2bea87a73a989f0c Mon Sep 17 00:00:00 2001 From: Peter Schiffer Date: Thu, 18 May 2017 19:14:11 +0200 Subject: More ansible migration and deploy OCP from local workstation (#376) * Create registry bucket with deployment manager * Migrate ssh proxy to Ansible * Update gce dynamic inventory script, use instance name for ssh * Fix variable name in docker storage setup role * Deploy OCP from local workstation, and not from the bastion host --- roles/docker-storage-setup/templates/docker-storage-setup.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 index a5203d7e4..b5869feff 100644 --- a/roles/docker-storage-setup/templates/docker-storage-setup.j2 +++ b/roles/docker-storage-setup/templates/docker-storage-setup.j2 @@ -1,4 +1,4 @@ -DEVS="{{ docker_devs }}" +DEVS="{{ docker_dev }}" VG="{{ docker_vg }}" DATA_SIZE="{{ docker_data_size }}" EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.1 From 469a88f6d7609df5ffaab812093e0c58baa3be29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 5 Jun 2017 16:47:13 -0400 Subject: Conditionally set the openshift_master_default_subdomain to avoid overriding it unecessary (#47) --- roles/common/pre_tasks/pre_tasks.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 06a56605d..cc4e64a0f 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -34,3 +34,5 @@ set_fact: openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined -- cgit v1.2.1 From 22e88c9ce8f81cb13c3d050455d332161a1acd83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 13 Jun 2017 15:35:22 -0400 Subject: Update CASL to use nsupdate for DNS records (#48) * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Upgrading jinja2 to work correctly with latest templates * Latest update for nsupdate * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Latest update for nsupdate * Updated to support external public/private DNS server(s) * Updated DNS server handling * Updated DNS server handling * Updated DNS server handling * Eliminated the from the sample inventories * Updated sample inventory to point to 2 separate DNS servers for private/public * Playbook clean-up * Adding 'python-dns' * splitting subscription manager calls to allow for a clean pre-install playbook --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/dns-server-detect/defaults/main.yml | 3 ++ roles/dns-server-detect/tasks/main.yml | 38 ++++++++++++++++++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 12 +++---- roles/openstack-stack/test/stack-create-test.yml | 4 +-- 5 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 roles/dns-server-detect/defaults/main.yml create mode 100644 roles/dns-server-detect/tasks/main.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index cc4e64a0f..c5e79e89c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -22,7 +22,7 @@ - name: Updating DNS domain to include env_id (if not empty) set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" delegate_to: localhost - name: Set the APP domain for OpenShift use diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml new file mode 100644 index 000000000..58bd861cd --- /dev/null +++ b/roles/dns-server-detect/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml new file mode 100644 index 000000000..e8dd0acf0 --- /dev/null +++ b/roles/dns-server-detect/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- fail: + msg: 'Missing required private DNS server(s)' + when: + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- fail: + msg: 'Missing required public DNS server(s)' + when: + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" + when: + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + when: + - private_dns_server is undefined + +- name: "Set the public DNS server to use the external value (if provided)" + set_fact: + public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" + when: + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server to use the provisioned value" + set_fact: + public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + when: + - public_dns_server is undefined + diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c367aabe7..09b62cba7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -381,7 +381,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: etcd - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -421,7 +421,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: lb - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -461,7 +461,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: master - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -505,7 +505,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: app - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -548,7 +548,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: infra - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -591,7 +591,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: dns - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 94e312ee3..6cbd7ff30 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -3,8 +3,8 @@ roles: - role: openstack-stack stack_name: test-stack - dns_domain: "{{ openstack_dns_domain }}" - dns_nameservers: "{{ openstack_nameservers }}" + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" -- cgit v1.2.1 From c3cefa9996fb67b846f44eed78644a0f52d76df1 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:39:21 +0200 Subject: Move pre_tasks from to the openstack provisioner We should probably not pollute the role namespace with a name as common as "common". Moving the pre_task.yml to provisioners/openstack instead. --- roles/common/pre_tasks/pre_tasks.yml | 38 ------------------------------------ 1 file changed, 38 deletions(-) delete mode 100644 roles/common/pre_tasks/pre_tasks.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml deleted file mode 100644 index c5e79e89c..000000000 --- a/roles/common/pre_tasks/pre_tasks.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Generate Environment ID - set_fact: - env_random_id: "{{ ansible_date_time.epoch }}" - run_once: true - delegate_to: localhost - -- name: Set default Environment ID - set_fact: - default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" - delegate_to: localhost - -- name: Setting Common Facts - set_fact: - env_id: "{{ env_id | default(default_env_id) }}" - delegate_to: localhost - -- name: Set Dynamic Inventory Filters - shell: > - export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} - delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) - set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" - delegate_to: localhost - -- name: Set the APP domain for OpenShift use - set_fact: - openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" - delegate_to: localhost - -- name: Set the default app domain for routing purposes - set_fact: - openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" - delegate_to: localhost - when: - - openshift_master_default_subdomain is undefined -- cgit v1.2.1 From a7300e6b7ace3098aa05794d4ac2f9e5a4cef64a Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 13:28:00 +0200 Subject: Always let the openshift nodes access the DNS When `node_ingress_cidr` to limit the IP range for the DNS server, this can prevent the actual openshift nodes from accessing it as well. This commit makes the access from the `openstack_subnet_prefix` always pass through and uses `node_ingress_cidr` for additional access control. --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 09b62cba7..c10b1d90f 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -289,6 +289,11 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" infra-secgrp: type: OS::Neutron::SecurityGroup @@ -337,11 +342,21 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" {% if num_masters is greaterthan 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup -- cgit v1.2.1 From 4bb2f005bc6cdeb8e656c2b42ac54db8fbd67fb9 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 5 Jun 2017 16:41:09 +0200 Subject: Add a flat sec group for openstack provider Add a openstack_flat_secgroup, defaults to False. When set, merges sec rules for master, node, etcd, infra nodes into a single group. Less secure, but might help to mitigate quota limitations. Update docs. Use timeout 30s to mitigate the error: Timeout (12s) waiting for privilege escalation prompt. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 138 +++++++++++++++++++-- 1 file changed, 128 insertions(+), 10 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c10b1d90f..c750865a5 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -142,6 +142,119 @@ resources: # cluster_id: {{ stack_name }} # public_key: {{ ssh_public_key }} +{% if openstack_flat_secgrp|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 +{% else %} master-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -168,10 +281,6 @@ resources: - direction: ingress protocol: tcp port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 port_range_max: 8444 - direction: ingress protocol: tcp @@ -204,10 +313,6 @@ resources: - direction: ingress protocol: udp port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 port_range_max: 5405 - direction: ingress protocol: tcp @@ -317,6 +422,7 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 +{% endif %} dns-secgrp: type: OS::Neutron::SecurityGroup @@ -411,7 +517,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: etcd-secgrp } + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } floating_network: {{ external_network }} net_name: str_replace: @@ -491,10 +597,14 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } {% if num_etcd is equalto 0 %} - { get_resource: etcd-secgrp } +{% endif %} {% endif %} floating_network: {{ external_network }} net_name: @@ -538,7 +648,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: node-secgrp } + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } floating_network: {{ external_network }} net_name: str_replace: @@ -581,8 +691,12 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: node-secgrp } - { get_resource: infra-secgrp } +{% endif %} floating_network: {{ external_network }} net_name: str_replace: @@ -621,7 +735,11 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: node-secgrp } +{% endif %} - { get_resource: dns-secgrp } floating_network: {{ external_network }} net_name: -- cgit v1.2.1 From b884e6a9c77ae2d86b2de3c4ae6e8de558444610 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 12 Jun 2017 12:02:41 +0200 Subject: Drop atomic-openshift-utils, update docs for origin TODO use with when: ansible_distribution == 'CentOS' Also update docs for origin Signed-off-by: Bogdan Dobrelya --- roles/openshift-prep/tasks/prerequisites.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 1286905f4..60507636f 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -1,7 +1,7 @@ --- - name: "Cleaning yum repositories" command: "yum clean all" - + - name: "Install required packages" yum: name: "{{ item }}" @@ -13,7 +13,6 @@ - bind-utils - bridge-utils - bash-completion - - atomic-openshift-utils - vim-enhanced - name: "Update all packages (this can take a very long time)" -- cgit v1.2.1 From bf7e5e82872684088995cc55559f8e51fe35d4a9 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 16 Jun 2017 17:52:37 +0200 Subject: Fix yamllint errors --- roles/dns-server-detect/tasks/main.yml | 2 - roles/hostnames/tasks/main.yaml | 4 +- roles/hostnames/test/test.yaml | 17 ------- roles/hostnames/vars/records.yaml | 54 +++++++++++----------- roles/openstack-stack/test/stack-create-test.yml | 1 - roles/subscription-manager/pre_tasks/pre_tasks.yml | 4 +- roles/subscription-manager/tasks/main.yml | 2 +- 7 files changed, 32 insertions(+), 52 deletions(-) (limited to 'roles') diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml index e8dd0acf0..183c0a0ca 100644 --- a/roles/dns-server-detect/tasks/main.yml +++ b/roles/dns-server-detect/tasks/main.yml @@ -1,5 +1,4 @@ --- - - fail: msg: 'Missing required private DNS server(s)' when: @@ -35,4 +34,3 @@ public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" when: - public_dns_server is undefined - diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bf142d653..c49852210 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -15,8 +15,8 @@ register: cloud_cfg - name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg + lineinfile: + dest: /etc/cloud/cloud.cfg state: present regexp: "{{ item.regexp }}" line: "{{ item.line }}" diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml index 34bf37942..0c56aea51 100644 --- a/roles/hostnames/test/test.yaml +++ b/roles/hostnames/test/test.yaml @@ -2,20 +2,3 @@ - hosts: all roles: - role: hostnames - -# - debug: -# -# - hosts: dns -# roles: -# - role: dns-server -# named_config_views: -# - name: private -# acl_entry: -# - 192.168.124.40/32 -# - 192.168.124.40/32 -# zone: -# - dns_domain: example.com -# - name: public -# zone: -# - dns_domain: example.com -# - role: dns diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml index 3bf12ae2b..0cadc8181 100644 --- a/roles/hostnames/vars/records.yaml +++ b/roles/hostnames/vars/records.yaml @@ -1,28 +1,28 @@ --- - - name: "Building Records" - set_fact: - dns_records_add: - - view: private - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 172.16.15.94 - - type: A - hostname: node1.example.com - ip: 172.16.15.86 - - type: A - hostname: node2.example.com - ip: 172.16.15.87 - - view: public - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 10.3.10.116 - - type: A - hostname: node1.example.com - ip: 10.3.11.46 - - type: A - hostname: node2.example.com - ip: 10.3.12.6 +- name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 6cbd7ff30..0fbf66f34 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -14,4 +14,3 @@ infra_flavor: "{{ openstack_default_flavor }}" dns_flavor: "{{ openstack_default_flavor }}" external_network: "{{ openstack_external_network_name }}" - diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index b21356cf2..464670fc0 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -11,7 +11,7 @@ - name: "Determine if Subscription Manager should be used" set_fact: rhsm_register: false - when: + when: - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' @@ -21,7 +21,7 @@ - name: "Validate Subscription Manager organization is set" fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" - when: + when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - rhsm_satellite is defined - rhsm_satellite is not none diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 2dd14b48e..8c1ae697a 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -4,7 +4,7 @@ rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - + - name: "Initializing Subscription Manager authentication method" set_fact: rhsm_authentication: false -- cgit v1.2.1 From 0908b25d45b9a5297ed341f136f8d42e59438553 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 21 Jun 2017 15:22:09 +0200 Subject: Use cached facts, do not become for localhost (#484) Prohibit sudoing for localhost played tasks, like DNS setup. Re-use cached facts to speed up deployment. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/test/stack-create-test.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 0fbf66f34..d80472193 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -1,5 +1,7 @@ --- - hosts: localhost + gather_facts: True + become: False roles: - role: openstack-stack stack_name: test-stack -- cgit v1.2.1 From 8219f17503e16620b4881faefc78023c696ed2e5 Mon Sep 17 00:00:00 2001 From: Tzu-Mainn Chen Date: Wed, 21 Jun 2017 18:01:48 -0400 Subject: Add node_removal_policies variable to allow for scaling down --- roles/openstack-stack/defaults/main.yml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 2 ++ 2 files changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 2a4ef3a45..4f859585f 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -9,4 +9,5 @@ num_masters: 1 num_nodes: 1 num_dns: 1 num_infra: 1 +node_removal_policies: [] etcd_volume_size: 2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c750865a5..3916eec02 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -620,6 +620,8 @@ resources: type: OS::Heat::ResourceGroup properties: count: {{ num_nodes }} + removal_policies: + - resource_list: {{ node_removal_policies }} resource_def: type: server.yaml properties: -- cgit v1.2.1 From 3f10c266aab0881ab294513d4ef93a1528d33c6b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 21 Jun 2017 13:32:48 +0200 Subject: Fix flat sec group and infra/dns sec rules Make flat sec group to only merge node/master/etcd sec rules. Add basic dns/ssh sec group and assign it to all but dns node groups. Assign only dns sec group for dns nodes. Assign only infra (and basic) sec groups for ingra nodes. Add security notes for openstack provider. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 92 ++++++++-------------- 1 file changed, 33 insertions(+), 59 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c750865a5..cba03e2ca 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -142,18 +142,17 @@ resources: # cluster_id: {{ stack_name }} # public_key: {{ ssh_public_key }} -{% if openstack_flat_secgrp|bool %} - flat-secgrp: + common-secgrp: type: OS::Neutron::SecurityGroup properties: name: str_replace: - template: openshift-ansible-cluster_id-flat-secgrp + template: openshift-ansible-cluster_id-common-secgrp params: cluster_id: {{ stack_name }} description: str_replace: - template: Security group for cluster_id OpenShift cluster + template: Basic ssh/dns security group for cluster_id OpenShift cluster params: cluster_id: {{ stack_name }} rules: @@ -162,14 +161,6 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8444 - direction: ingress protocol: tcp port_range_min: 53 @@ -178,6 +169,30 @@ resources: protocol: udp port_range_min: 53 port_range_max: 53 + +{% if openstack_flat_secgrp|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 - direction: ingress protocol: tcp port_range_min: 8053 @@ -246,14 +261,6 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 {% else %} master-secgrp: type: OS::Neutron::SecurityGroup @@ -269,11 +276,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 4001 @@ -282,14 +284,6 @@ resources: protocol: tcp port_range_min: 8443 port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - direction: ingress protocol: tcp port_range_min: 8053 @@ -333,11 +327,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 2379 @@ -364,11 +353,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 10250 @@ -399,6 +383,7 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} infra-secgrp: type: OS::Neutron::SecurityGroup @@ -422,7 +407,6 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 -{% endif %} dns-secgrp: type: OS::Neutron::SecurityGroup @@ -470,11 +454,6 @@ resources: name: openshift-ansible-{{ stack_name }}-lb-secgrp description: Security group for {{ stack_name }} cluster Load Balancer rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} @@ -518,6 +497,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -558,6 +538,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -606,6 +587,7 @@ resources: - { get_resource: etcd-secgrp } {% endif %} {% endif %} + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -649,6 +631,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -691,12 +674,8 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } - { get_resource: infra-secgrp } -{% endif %} + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -735,11 +714,6 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } -{% endif %} - { get_resource: dns-secgrp } floating_network: {{ external_network }} net_name: -- cgit v1.2.1 From 2fa7c112561eca54e0980902bda6920506c96f92 Mon Sep 17 00:00:00 2001 From: Tzu-Mainn Chen Date: Fri, 23 Jun 2017 15:47:17 -0400 Subject: rename node_removal_policies, add some comments and defaults --- roles/openstack-stack/defaults/main.yml | 2 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 4f859585f..4831d6bc4 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -9,5 +9,5 @@ num_masters: 1 num_nodes: 1 num_dns: 1 num_infra: 1 -node_removal_policies: [] +nodes_to_remove: [] etcd_volume_size: 2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 3916eec02..32ea5ec1d 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -621,7 +621,7 @@ resources: properties: count: {{ num_nodes }} removal_policies: - - resource_list: {{ node_removal_policies }} + - resource_list: {{ nodes_to_remove }} resource_def: type: server.yaml properties: -- cgit v1.2.1 From 8af0a60120689267515d7766c432a414eb55d51c Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 26 Jun 2017 09:46:41 +0200 Subject: Modify sec groups for provisioned openstack servers Drop ingress DNS rules from the common secgrp. Add an ingress ICMP rule, restricted by the ssh ingress cidr, to the common secgrp. This allows to ping servers from the control node (ansible admin node). Add dns servers into the common secgrp as well. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index cba03e2ca..7fd52e52d 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -152,7 +152,7 @@ resources: cluster_id: {{ stack_name }} description: str_replace: - template: Basic ssh/dns security group for cluster_id OpenShift cluster + template: Basic ssh/icmp security group for cluster_id OpenShift cluster params: cluster_id: {{ stack_name }} rules: @@ -162,13 +162,8 @@ resources: port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 + protocol: icmp + remote_ip_prefix: {{ ssh_ingress_cidr }} {% if openstack_flat_secgrp|bool %} flat-secgrp: @@ -422,11 +417,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: udp port_range_min: 53 @@ -715,6 +705,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: -- cgit v1.2.1 From 2d6b572891e5f9c5f8950f86ae741b386b3d8289 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 27 Jun 2017 18:38:22 +0200 Subject: Put back node/flat secgrp for infra nodes on openstack Partially undo 2028883e936c8a1a0be031a19d531d0804a32b68 to unblock end-to-end deployments Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index cba03e2ca..2d957cc5b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -674,6 +674,12 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +# TODO(bogdando) filter only required node rules into infra-secgrp +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } floating_network: {{ external_network }} -- cgit v1.2.1 From b28d6d787fbdc6f242aff77830a85693c148faa7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 29 Jun 2017 17:59:22 +0200 Subject: Manage packages to install/update for openstack provider Allow required packages and yum update all steps to be optionally disabled. Signed-off-by: Bogdan Dobrelya --- roles/openshift-prep/defaults/main.yml | 11 +++++++++++ roles/openshift-prep/tasks/prerequisites.yml | 13 ++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) create mode 100644 roles/openshift-prep/defaults/main.yml (limited to 'roles') diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml new file mode 100644 index 000000000..fac25dcc1 --- /dev/null +++ b/roles/openshift-prep/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# Defines either to install required packages and update all +manage_packages: true +required_packages: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - vim-enhanced diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 60507636f..433c1c4e3 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -6,19 +6,14 @@ yum: name: "{{ item }}" state: latest - with_items: - - wget - - git - - net-tools - - bind-utils - - bridge-utils - - bash-completion - - vim-enhanced + with_items: "{{ required_packages }}" + when: manage_packages|bool - name: "Update all packages (this can take a very long time)" yum: - name: "*" + name: '*' state: latest + when: manage_packages|bool - name: "Verify hostname" shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' -- cgit v1.2.1 From 1409e0a52d45b7781b3a23f3f7eaa8fe09d26cd6 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 19 Jun 2017 12:24:23 +0200 Subject: Persist DNS configuration for nodes for openstack provider * Firstly, provision a Heat stack with given public resolvers. * After the DNS node configured as an authoritative server, switch the Heat stack's Neutron subnet to that resolver (private_dns_server) the way it to become the first entry pushed into the hosts /etc/resolv.conf. It will be serving the cluster domain requests for OpenShift nodes and workloads. * Drop post-provision /etc/reslov.conf nameserver hacks as not needed anymore. * Fix dns floating IPs output and add the priv IPs output as well. * Update docs, clarify localhost vs servers requirements, add required Network Manager setup step. * Use post-provision task names instead of comments. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/tasks/main.yml | 5 +++++ .../tasks/subnet_update_dns_servers.yaml | 8 ++++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 23 +++++++++------------- 3 files changed, 22 insertions(+), 14 deletions(-) create mode 100644 roles/openstack-stack/tasks/subnet_update_dns_servers.yaml (limited to 'roles') diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 71c7bbe0d..a53e6350b 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -35,6 +35,11 @@ template: "{{ stack_template_path }}" wait: yes +# NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for +# dns_nameservers, so we can't do that for the "create stack" task. +- include: subnet_update_dns_servers.yaml + when: private_dns_server is defined + - name: cleanup temp files file: path: "{{ stack_template_pre.path }}" diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml new file mode 100644 index 000000000..be4f07b97 --- /dev/null +++ b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml @@ -0,0 +1,8 @@ +--- +- name: Live update the subnet's DNS servers + os_subnet: + name: openshift-ansible-{{ stack_name }}-subnet + network_name: openshift-ansible-{{ stack_name }}-net + state: present + use_default_subnetpool: yes + dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 00a46896c..8bf76b57c 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -61,18 +61,13 @@ outputs: - dns - name - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - 1 - - addr + dns_floating_ips: + description: Floating IPs of the DNS + value: { get_attr: [ dns, floating_ip ] } + + dns_private_ips: + description: Private IPs of the DNS + value: { get_attr: [ dns, private_ip ] } resources: @@ -111,9 +106,9 @@ resources: params: subnet_24_prefix: {{ subnet_prefix }} dns_nameservers: - {% for nameserver in dns_nameservers %} +{% for nameserver in dns_nameservers %} - {{ nameserver }} - {% endfor %} +{% endfor %} router: type: OS::Neutron::Router -- cgit v1.2.1 From a3a61ab4544d97dbc76dcd278c0f17d7a17fa022 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 12 Jul 2017 17:30:00 +0200 Subject: Add defaults values for some openstack vars (#539) * Add defaults values for some openstack vars Ansible shows errors when the `rhsm_register` and `openstack_flat_secgrp` values are not present in the inventory even though they have sensible default values. This makes them both default to false when they're not specified. * Comment out the flat security group option in inv It's no longer required to be there so let's comment it out. --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 8bf76b57c..566b57ef8 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -160,7 +160,7 @@ resources: protocol: icmp remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} flat-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -481,7 +481,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: @@ -563,7 +563,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: master-secgrp } @@ -617,7 +617,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: @@ -662,7 +662,7 @@ resources: subnet: { get_resource: subnet } secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } @@ -718,4 +718,3 @@ resources: volume_size: {{ dns_volume_size }} depends_on: - interface - -- cgit v1.2.1 From bb483b5877a18422d382f2348ad53d4de07a5fd7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 13 Jul 2017 13:13:40 +0000 Subject: Replace greaterthan and equalto in openstack-stack These two Jinja filters were added in 2.8 which is notably not packaged in CentOS and RHEL. This removes them in favour of the `==` and `>` operators which are available in Jinja 2.7. --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 566b57ef8..992f6257b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -432,7 +432,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters is greaterthan 1 %} +{% if num_masters > 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -444,7 +444,7 @@ resources: port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} remote_ip_prefix: {{ lb_ingress_cidr }} - {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} @@ -493,7 +493,7 @@ resources: depends_on: - interface -{% if num_masters is greaterthan 1 %} +{% if num_masters > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -568,7 +568,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd is equalto 0 %} +{% if num_etcd == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} -- cgit v1.2.1 From fb3d95ff05257906d846562b752fb9258794dc38 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 14 Jul 2017 12:22:51 +0200 Subject: Set up NetworkManager automatically (#542) * Set up NetworkManager automatically This removes the extra step of running the `openshift-ansible/playbooks/byo/openshift-node/network_manager.yml` before installing openshift. In addition, the playbook relies on a host group that the provisioning doesn't provide (oo_all_hosts). Instead, we set up NetworkManager on CentOS nodes automatically. And we restart it on RHEL (which is necessary for the nodes to pick up the new DNS we configured the subnet with). This makes the provisioning easier and more resilient. * Apply the node-network-manager role to every node It makes the code simpler and more consistent across distros. --- roles/node-network-manager/tasks/main.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 roles/node-network-manager/tasks/main.yml (limited to 'roles') diff --git a/roles/node-network-manager/tasks/main.yml b/roles/node-network-manager/tasks/main.yml new file mode 100644 index 000000000..6a17855e7 --- /dev/null +++ b/roles/node-network-manager/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: install NetworkManager + package: + name: NetworkManager + state: present + +- name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + +- name: enable and start NetworkManager + service: + name: NetworkManager + state: restarted + enabled: yes -- cgit v1.2.1 From bef7807177915fe4861fcef5c4a78884f49b3b0e Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Mon, 17 Jul 2017 12:47:03 +0200 Subject: Retry tasks in the subscription manager role (#552) * subscription manager: added 10 retries after 1 second delay * subscription manager: added untils * sub manager: typo --- roles/subscription-manager/tasks/main.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 8c1ae697a..e4c9fdffb 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -52,6 +52,10 @@ when: - not registered - rhsm_authentication is defined + register: cleaningsubs_result + until: cleaningsubs_result.rc == 0 + retries: 10 + delay: 1 - name: "Install Satellite certificate" command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" @@ -69,6 +73,10 @@ - rhsm_satellite is defined - rhsm_satellite is not none - rhsm_satellite|trim != '' + register: register_key_result + until: register_key_result.rc == 0 + retries: 10 + delay: 1 # This can apply to either Hosted or Satellite - name: "Register using username and password" @@ -78,6 +86,10 @@ - not registered - rhsm_authentication == "password" - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + register: register_userpw_result + until: register_userpw_result.rc == 0 + retries: 10 + delay: 1 # This can apply to either Hosted or Satellite - name: "Register using username, password and organization" @@ -89,12 +101,20 @@ - rhsm_org is defined - rhsm_org is not none - rhsm_org|trim != '' + register: register_userpworg_result + until: register_userpworg_result.rc == 0 + retries: 10 + delay: 1 - name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" when: - not registered - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' + register: autoattach_result + until: autoattach_result.rc == 0 + retries: 10 + delay: 1 - name: "Attach to a specific pool" command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" @@ -103,6 +123,10 @@ - rhsm_pool is not none - rhsm_pool|trim != '' - not registered + register: attachpool_result + until: attachpool_result.rc == 0 + retries: 10 + delay: 1 - name: "Disable all repositories" command: "/usr/bin/subscription-manager repos --disable=*" @@ -120,3 +144,7 @@ - rhsm_repos is defined - rhsm_repos is not none - rhsm_repos|trim != '' + register: enablerepos_result + until: enablerepos_result.rc == 0 + retries: 10 + delay: 1 -- cgit v1.2.1 From a0d2dd9d29e8622e739870baf172f2b8a7e9c6a0 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 17 Jul 2017 14:05:42 +0200 Subject: Add a role to generate a static inventory (#540) * Add the static-inventory role that configures the inventory/hosts file by the given path, or creates it for you. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/defaults/main.yml | 8 +++ roles/static_inventory/tasks/checkpoint.yml | 17 ++++++ roles/static_inventory/tasks/main.yml | 6 +++ roles/static_inventory/tasks/openstack.yml | 47 +++++++++++++++++ roles/static_inventory/templates/inventory.j2 | 76 +++++++++++++++++++++++++++ 5 files changed, 154 insertions(+) create mode 100644 roles/static_inventory/defaults/main.yml create mode 100644 roles/static_inventory/tasks/checkpoint.yml create mode 100644 roles/static_inventory/tasks/main.yml create mode 100644 roles/static_inventory/tasks/openstack.yml create mode 100644 roles/static_inventory/templates/inventory.j2 (limited to 'roles') diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml new file mode 100644 index 000000000..315965cde --- /dev/null +++ b/roles/static_inventory/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Either to checkpoint the dynamic inventory into a static one +refresh_inventory: True +inventory: static +inventory_path: ~/openstack-inventory + +# SSH key to access nodes +private_ssh_key: ~/.ssh/openshift diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml new file mode 100644 index 000000000..c0365bd3d --- /dev/null +++ b/roles/static_inventory/tasks/checkpoint.yml @@ -0,0 +1,17 @@ +--- +- name: check for static inventory dir + stat: + path: "{{ inventory_path }}" + register: stat_inventory_path + +- name: create static inventory dir + file: + path: "{{ inventory_path }}" + state: directory + mode: 0750 + when: not stat_inventory_path.stat.exists + +- name: create inventory from template + template: + src: inventory.j2 + dest: "{{ inventory_path }}/hosts" diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml new file mode 100644 index 000000000..15c81690e --- /dev/null +++ b/roles/static_inventory/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Generate in-memory inventory + include: openstack.yml + +- name: Checkpoint in-memory data into a static inventory + include: checkpoint.yml diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml new file mode 100644 index 000000000..a25502835 --- /dev/null +++ b/roles/static_inventory/tasks/openstack.yml @@ -0,0 +1,47 @@ +--- +- no_log: true + block: + - name: fetch all nodes from openstack shade dynamic inventory + command: shade-inventory --list + register: registered_nodes_output + when: refresh_inventory|bool + + - name: set fact for openstack inventory cluster nodes + set_fact: + registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" + vars: + q: "[] | [?metadata.clusterid=='{{stack_name}}']" + when: + - refresh_inventory|bool + + - name: set_fact for openstack inventory nodes + set_fact: + registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" + vars: + q: "[] | [?metadata.group=='infra.{{stack_name}}']" + q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" + when: + - refresh_inventory|bool + + - name: Add cluster nodes w/o floating IPs to inventory + with_items: "{{ registered_nodes }}" + when: not item in registered_nodes_floating + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + ansible_host: '{{ item.private_v4 }}' + ansible_fqdn: '{{ item.name }}' + ansible_private_key_file: '{{ private_ssh_key }}' + private_v4: '{{ item.private_v4 }}' + + - name: Add cluster nodes with floating IPs to inventory + with_items: "{{ registered_nodes_floating }}" + when: item in registered_nodes_floating + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + ansible_host: '{{ item.public_v4 }}' + ansible_fqdn: '{{ item.name }}' + ansible_private_key_file: '{{ private_ssh_key }}' + private_v4: '{{ item.private_v4 }}' + public_v4: '{{ item.public_v4 }}' diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 new file mode 100644 index 000000000..aa87e2b11 --- /dev/null +++ b/roles/static_inventory/templates/inventory.j2 @@ -0,0 +1,76 @@ +# BEGIN Autogenerated hosts +{% for host in groups['all'] %} +{% if hostvars[host].get('ansible_connection', '') == 'local' %} +{{ host }} ansible_connection=local +{% else %} + +{{ host }}{% if 'ansible_host' in hostvars[host] +%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %} +{% if 'private_v4' in hostvars[host] +%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} +{% if 'public_v4' in hostvars[host] +%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} +{% if 'ansible_private_key_file' in hostvars[host] +%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} + +{% endif %} +{% endfor %} +# END autogenerated hosts + +#[all:vars] +# For all group_vars, see ./group_vars/all.yml + +# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. +# The lb group lets Ansible configure HAProxy as the load balancing solution. +# Comment lb out if your load balancer is pre-configured. +[cluster_hosts:children] +OSEv3 +dns + +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +#[OSEv3:vars] + +# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml + +# Host Groups + +[masters:children] +masters.{{ stack_name }} + +[etcd:children] +etcd.{{ stack_name }} + +[nodes:children] +masters +infra.{{ stack_name }} +nodes.{{ stack_name }} + +[infra_hosts:children] +infra.{{ stack_name }} + +[dns:children] +dns.{{ stack_name }} + +# Empty placeholders for all groups of the cluster nodes +[masters.{{ stack_name }}] +[etcd.{{ stack_name }}] +[infra.{{ stack_name }}] +[nodes.{{ stack_name }}] +[dns.{{ stack_name }}] + +# BEGIN Autogenerated groups +{% for group in groups %} +{% if group not in ['ungrouped', 'all'] %} +[{{ group }}] +{% for host in groups[group] %} +{{ host }} +{% endfor %} + +{% endif %} +{% endfor %} +# END Autogenerated groups -- cgit v1.2.1 From 244d4f2347526e6e7428e81f882793aaca75a770 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 19 Jul 2017 17:40:53 +0200 Subject: During provisioning, make unnecessary packages optional under a switch (#561) * openshift-prep: bash-completion and vim-enhanced packages are now optional under install_debug_packages switch * openshift-prep: new line removal --- roles/openshift-prep/defaults/main.yml | 2 ++ roles/openshift-prep/tasks/prerequisites.yml | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'roles') diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml index fac25dcc1..c8c9a00c0 100644 --- a/roles/openshift-prep/defaults/main.yml +++ b/roles/openshift-prep/defaults/main.yml @@ -1,11 +1,13 @@ --- # Defines either to install required packages and update all manage_packages: true +install_debug_packages: false required_packages: - wget - git - net-tools - bind-utils - bridge-utils +debug_packages: - bash-completion - vim-enhanced diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 433c1c4e3..b7601aa48 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -9,6 +9,13 @@ with_items: "{{ required_packages }}" when: manage_packages|bool +- name: "Install debug packages (optional)" + yum: + name: "{{ item }}" + state: latest + with_items: "{{ debug_packages }}" + when: install_debug_packages|bool + - name: "Update all packages (this can take a very long time)" yum: name: '*' -- cgit v1.2.1 From 018d410c4d441a8f66e8ac71d82f4ce46b508364 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 20 Jul 2017 14:52:11 +0200 Subject: Include masters into etcd group, when it is empty (#559) Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/templates/inventory.j2 | 1 + 1 file changed, 1 insertion(+) (limited to 'roles') diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index aa87e2b11..ba806f286 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -44,6 +44,7 @@ masters.{{ stack_name }} [etcd:children] etcd.{{ stack_name }} +{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %} [nodes:children] masters -- cgit v1.2.1 From 1975fb57b4ddee77eec6f849f2c7677e2ee3d6df Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 20 Jul 2017 14:53:01 +0200 Subject: Generate static inventory with shade inventory (#538) * Autogenerate inventory/hosts when 'inventory: static' (Default), with the shade-inventory tool. * Drop unused anymore: openstack.py and associated GPL notes, an example static inventory, omit manual updates for the inventory DNS names in the deployment guide. * Switch openstack.py formatted inventory hostvars to the shade-inventory format (omit openstack.* from hostvars). * Populate node labels from inventory vars instead of the heat templates combined with inventory vars. * Add app (k8s minions) nodes group for primary node labels. Signed-off-by: Bogdan Dobrelya --- roles/common/defaults/main.yml | 6 ++++++ roles/dns-server-detect/tasks/main.yml | 20 ++++++++++---------- roles/openstack-stack/meta/main.yml | 3 +++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++++-- roles/static_inventory/meta/main.yml | 3 +++ roles/static_inventory/templates/inventory.j2 | 10 +++++++++- 6 files changed, 37 insertions(+), 13 deletions(-) create mode 100644 roles/common/defaults/main.yml create mode 100644 roles/openstack-stack/meta/main.yml create mode 100644 roles/static_inventory/meta/main.yml (limited to 'roles') diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml new file mode 100644 index 000000000..8db591374 --- /dev/null +++ b/roles/common/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_cluster_node_labels: + app: + region: primary + infra: + region: infra diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml index 183c0a0ca..cd775814f 100644 --- a/roles/dns-server-detect/tasks/main.yml +++ b/roles/dns-server-detect/tasks/main.yml @@ -2,35 +2,35 @@ - fail: msg: 'Missing required private DNS server(s)' when: - - external_nsupdate_keys['private'] is undefined - - hostvars[groups['dns'][0]] is undefined + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined - fail: msg: 'Missing required public DNS server(s)' when: - - external_nsupdate_keys['public'] is undefined - - hostvars[groups['dns'][0]] is undefined + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined - name: "Set the private DNS server to use the external value (if provided)" set_fact: private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" when: - - external_nsupdate_keys['private'] is defined + - external_nsupdate_keys['private'] is defined - name: "Set the private DNS server to use the provisioned value" set_fact: - private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + private_dns_server: "{{ hostvars[groups['dns'][0]].private_v4 }}" when: - - private_dns_server is undefined + - private_dns_server is undefined - name: "Set the public DNS server to use the external value (if provided)" set_fact: public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" when: - - external_nsupdate_keys['public'] is defined + - external_nsupdate_keys['public'] is defined - name: "Set the public DNS server to use the provisioned value" set_fact: - public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + public_dns_server: "{{ hostvars[groups['dns'][0]].public_v4 }}" when: - - public_dns_server is undefined + - public_dns_server is undefined diff --git a/roles/openstack-stack/meta/main.yml b/roles/openstack-stack/meta/main.yml new file mode 100644 index 000000000..fdda41bb3 --- /dev/null +++ b/roles/openstack-stack/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 992f6257b..54941db06 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -610,7 +610,9 @@ resources: type: node subtype: app node_labels: - region: primary +{% for k, v in openshift_cluster_node_labels.app.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} image: {{ openstack_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} @@ -654,7 +656,9 @@ resources: type: node subtype: infra node_labels: - region: infra +{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} image: {{ openstack_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml new file mode 100644 index 000000000..fdda41bb3 --- /dev/null +++ b/roles/static_inventory/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index ba806f286..9d129cf10 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -19,6 +19,11 @@ #[all:vars] # For all group_vars, see ./group_vars/all.yml +[infra_hosts:vars] +openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }} + +[app:vars] +openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }} # Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. # The lb group lets Ansible configure HAProxy as the load balancing solution. @@ -28,7 +33,6 @@ OSEv3 dns [OSEv3:children] -masters nodes etcd @@ -54,6 +58,9 @@ nodes.{{ stack_name }} [infra_hosts:children] infra.{{ stack_name }} +[app:children] +nodes.{{ stack_name }} + [dns:children] dns.{{ stack_name }} @@ -62,6 +69,7 @@ dns.{{ stack_name }} [etcd.{{ stack_name }}] [infra.{{ stack_name }}] [nodes.{{ stack_name }}] +[app.{{ stack_name }}] [dns.{{ stack_name }}] # BEGIN Autogenerated groups -- cgit v1.2.1 From cc67080cae89834c1b0a531870b5ee6425b0b2ac Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 25 Jul 2017 15:17:20 +0200 Subject: Set openshift_hostname explicitly for openstack (#579) This fixes a regression caused by the move to the static inventory. The nodes in `oc get nodes` should be (and had been) identified by their hostnames (e.g. master-0.openshift.example.com), but are now using their internal IP addresses instead. --- roles/static_inventory/templates/inventory.j2 | 1 + 1 file changed, 1 insertion(+) (limited to 'roles') diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 9d129cf10..464726a0b 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -12,6 +12,7 @@ %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} + openshift_hostname={{ host }} {% endif %} {% endfor %} -- cgit v1.2.1 From 677fd46cf37cab5f995170b3567939d784ebb07a Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 5 Jul 2017 12:46:57 +0200 Subject: Add bastion and ssh config for the static inventory role * Autogenerate SSH config for static inventory and bastion. * When using bastion, use FQDN for inventory's ansible_host and SSH config's Hostname. Simplifies accessing nodes by names instead of private IPs. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/defaults/main.yml | 13 +++++++++++ roles/static_inventory/tasks/main.yml | 4 ++++ roles/static_inventory/tasks/openstack.yml | 25 ++++++++++++++++++++-- roles/static_inventory/tasks/sshconfig.yml | 13 +++++++++++ roles/static_inventory/templates/inventory.j2 | 4 ++++ .../templates/openstack_ssh_config.j2 | 21 ++++++++++++++++++ 6 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 roles/static_inventory/tasks/sshconfig.yml create mode 100644 roles/static_inventory/templates/openstack_ssh_config.j2 (limited to 'roles') diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 315965cde..63de45646 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -4,5 +4,18 @@ refresh_inventory: True inventory: static inventory_path: ~/openstack-inventory +# Either to configure bastion +use_bastion: true + +# SSH user/key/options to access hosts via bastion +ssh_user: openshift +ssh_options: >- + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no + -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s + -o ServerAliveInterval=30 -o GSSAPIAuthentication=no + # SSH key to access nodes private_ssh_key: ~/.ssh/openshift + +# The patch to store the generated config to access bastion/hosts +ssh_config_path: /tmp/ssh.config.ansible diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index 15c81690e..b58866017 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -4,3 +4,7 @@ - name: Checkpoint in-memory data into a static inventory include: checkpoint.yml + +- name: Generate SSH config for accessing hosts via bastion + include: sshconfig.yml + when: use_bastion|bool diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index a25502835..95d0d172f 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -16,12 +16,14 @@ - name: set_fact for openstack inventory nodes set_fact: + registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" vars: q: "[] | [?metadata.group=='infra.{{stack_name}}']" q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" when: - refresh_inventory|bool + - use_bastion|bool - name: Add cluster nodes w/o floating IPs to inventory with_items: "{{ registered_nodes }}" @@ -29,9 +31,11 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: '{{ item.private_v4 }}' + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' + ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' private_v4: '{{ item.private_v4 }}' - name: Add cluster nodes with floating IPs to inventory @@ -40,8 +44,25 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: '{{ item.public_v4 }}' + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' + ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' private_v4: '{{ item.private_v4 }}' public_v4: '{{ item.public_v4 }}' + + - name: Add bastion node to inventory + add_host: + name: bastion + groups: bastions + ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}' + ansible_fqdn: '{{ registered_bastion_nodes[0].name }}' + ansible_user: '{{ ssh_user }}' + ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' + private_v4: '{{ registered_bastion_nodes[0].private_v4 }}' + public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' + when: + - registered_bastion_nodes is defined + - use_bastion|bool diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml new file mode 100644 index 000000000..7119fe6ff --- /dev/null +++ b/roles/static_inventory/tasks/sshconfig.yml @@ -0,0 +1,13 @@ +--- +- name: set ssh proxy command prefix for accessing nodes via bastion + set_fact: + ssh_proxy_command: >- + ssh {{ ssh_options }} + -i {{ private_ssh_key }} + {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} + +- name: regenerate ssh config + template: + src: openstack_ssh_config.j2 + dest: "{{ ssh_config_path }}" + mode: 0644 diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 464726a0b..ac74db35c 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -10,8 +10,12 @@ %} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} {% if 'public_v4' in hostvars[host] %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} +{% if 'ansible_user' in hostvars[host] +%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} +{% if 'ansible_ssh_extra_args' in hostvars[host] +%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} {% endif %} diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2 new file mode 100644 index 000000000..ad5d1253a --- /dev/null +++ b/roles/static_inventory/templates/openstack_ssh_config.j2 @@ -0,0 +1,21 @@ +Host * + IdentitiesOnly yes + +Host bastion + Hostname {{ hostvars['bastion'].ansible_host }} + IdentityFile {{ hostvars['bastion'].ansible_private_key_file }} + User {{ ssh_user }} + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + +{% for host in groups['all'] | difference(groups['bastions'][0]) %} + +Host {{ host }} + Hostname {{ hostvars[host].ansible_host }} + ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22 + IdentityFile {{ hostvars[host].ansible_private_key_file }} + User {{ ssh_user }} + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + +{% endfor %} -- cgit v1.2.1 From df8f5f0e251a014ab30dabd62c17e151b7fe36e8 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 12 Jul 2017 13:09:45 +0200 Subject: Options for bastion, SSH config, static inventory autogeneration * At the provisioning stage, allow users to auto-generate SSH config, when using a static inventory. * Run playbooks to provsion and post-provision as a separate, when using a bastion. This re-applies the SSH config, which ansible can't do on the fly. * Support a pre-installed bastion node, colocated with the 1st infra node. * With a bastion enabled, reduce floating IP footprint to infra and dns nodes only, effectively isolating a cluster in a private network. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/defaults/main.yml | 2 + roles/openstack-stack/tasks/main.yml | 9 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 25 ++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 149 +++++++++++++++++++++ roles/static_inventory/tasks/openstack.yml | 7 +- roles/static_inventory/templates/inventory.j2 | 5 +- 6 files changed, 187 insertions(+), 10 deletions(-) create mode 100644 roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 4831d6bc4..803a96389 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -4,6 +4,7 @@ ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 lb_ingress_cidr: 0.0.0.0/0 +bastion_ingress_cidr: 0.0.0.0/0 num_etcd: 0 num_masters: 1 num_nodes: 1 @@ -11,3 +12,4 @@ num_dns: 1 num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 +use_bastion: False diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index a53e6350b..9b4855294 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -8,7 +8,6 @@ - name: set template paths set_fact: stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - server_template_path: "{{ stack_template_pre.path }}/server.yaml" user_data_template_path: "{{ stack_template_pre.path }}/user-data" - name: generate HOT stack template from jinja2 template @@ -19,7 +18,13 @@ - name: generate HOT server template from jinja2 template template: src: heat_stack_server.yaml.j2 - dest: "{{ server_template_path }}" + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate HOT server w/o floating IPs template from jinja2 template + template: + src: heat_stack_server_nofloating.yaml.j2 + dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" + when: use_bastion|bool - name: generate user_data from jinja2 template template: diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 54941db06..524f466ff 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -156,6 +156,13 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} +{% if use_bastion|bool %} + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ bastion_ingress_cidr }} +{% endif %} - direction: ingress protocol: icmp remote_ip_prefix: {{ ssh_ingress_cidr }} @@ -458,7 +465,11 @@ resources: properties: count: {{ num_etcd }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -483,7 +494,9 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -540,7 +553,11 @@ resources: properties: count: {{ num_masters }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -573,7 +590,9 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -590,7 +609,11 @@ resources: removal_policies: - resource_list: {{ nodes_to_remove }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -621,7 +644,9 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 new file mode 100644 index 000000000..792a8b90c --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -0,0 +1,149 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server w/o floating IP + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server_nofloating, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server_nofloating + - addresses + - { get_param: net_name } + - 0 + - addr + +resources: + + server_nofloating: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server_nofloating } + mountpoint: /dev/sdb diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 95d0d172f..499adf08c 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -23,11 +23,9 @@ q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" when: - refresh_inventory|bool - - use_bastion|bool - name: Add cluster nodes w/o floating IPs to inventory - with_items: "{{ registered_nodes }}" - when: not item in registered_nodes_floating + with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' @@ -40,11 +38,10 @@ - name: Add cluster nodes with floating IPs to inventory with_items: "{{ registered_nodes_floating }}" - when: item in registered_nodes_floating add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.public_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index ac74db35c..24dc9d4a8 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -14,9 +14,8 @@ %} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} -{% if 'ansible_ssh_extra_args' in hostvars[host] -%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} - openshift_hostname={{ host }} +{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] +%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} {% endif %} {% endfor %} -- cgit v1.2.1 From 5820aa4371aec8218426cdceab3360c6955fe018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Wed, 2 Aug 2017 14:40:08 +0000 Subject: Moving common DNS roles out of the playbook area (#605) --- roles/dns-records/tasks/main.yml | 82 ++++++++++++++++++++++++++++++++++++++++ roles/dns-views/tasks/main.yml | 25 ++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 roles/dns-records/tasks/main.yml create mode 100644 roles/dns-views/tasks/main.yml (limited to 'roles') diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml new file mode 100644 index 000000000..3672a8ea6 --- /dev/null +++ b/roles/dns-records/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: "Generate list of private A records" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" + nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_private is undefined + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Set the public DNS server details to use the external value (if provided)" + set_fact: + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server details to use the provisioned value" + set_fact: + nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" + nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_public is undefined + +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" + +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml new file mode 100644 index 000000000..7165b4269 --- /dev/null +++ b/roles/dns-views/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: "Generate ACL list for DNS server" + set_fact: + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Generate the private view" + set_fact: + private_named_view: + - name: "private" + acl_entry: "{{ acl_list }}" + zone: + - dns_domain: "{{ full_dns_domain }}" + +- name: "Generate the public view" + set_fact: + public_named_view: + - name: "public" + zone: + - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + +- name: "Generate the final named_config_views" + set_fact: + named_config_views: "{{ private_named_view + public_named_view }}" -- cgit v1.2.1 From e4cb854086c845fa301cddaefcba1e3accaa17d8 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 4 Aug 2017 15:26:35 +0200 Subject: Allow using ephemeral volumes for docker storage (#615) For testing cases it's sometimes useful to not create Cinder volumes for the VMs. It can also sometimes be a little faster and more robust (but unfit for production). This adds an option called `ephemeral_volumes` that will use the VM's storage instead of creating volumes when set to true. --- roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 2 ++ roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 | 2 ++ 2 files changed, 4 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 5851d3b9b..85af311ec 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -156,6 +156,7 @@ resources: floating_network: { get_param: floating_network } port_id: { get_resource: port } +{% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: @@ -168,3 +169,4 @@ resources: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server } mountpoint: /dev/sdb +{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 792a8b90c..a22b7c6d0 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -135,6 +135,7 @@ resources: - subnet: { get_param: subnet } security_groups: { get_param: secgrp } +{% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: @@ -147,3 +148,4 @@ resources: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server_nofloating } mountpoint: /dev/sdb +{% endif %} -- cgit v1.2.1 From 784443b0d88597b988c3d5c58bc6358f5c73675e Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 15 Aug 2017 17:48:58 +0200 Subject: Support multiple private networks for static inventory (#604) Add openstack_private_network_name to filter by a wanted private network. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/defaults/main.yml | 2 ++ roles/static_inventory/tasks/openstack.yml | 20 ++++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 63de45646..5b8aacf5c 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -19,3 +19,5 @@ private_ssh_key: ~/.ssh/openshift # The patch to store the generated config to access bastion/hosts ssh_config_path: /tmp/ssh.config.ansible + +openstack_private_network: private diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 499adf08c..75d0ee6d5 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -29,12 +29,20 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" + ansible_host: >- + {% if use_bastion|bool -%} + {{ item.name }} + {%- else -%} + {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} + {%- endif %} ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ item.private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} - name: Add cluster nodes with floating IPs to inventory with_items: "{{ registered_nodes_floating }}" @@ -46,7 +54,9 @@ ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ item.private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} public_v4: '{{ item.public_v4 }}' - name: Add bastion node to inventory @@ -58,7 +68,9 @@ ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ registered_bastion_nodes[0].private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' when: - registered_bastion_nodes is defined -- cgit v1.2.1 From 3d9676911df8eb0fc4ce03c5ccfab049b430f87b Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Tue, 15 Aug 2017 19:17:59 +0200 Subject: Specify different image names for roles (#637) * all.yml: set up new variables for specifying images for roles * stack_params.yaml: add image name variables for different roles * more roles added * heat_stack.yaml.j2: openstack_image changed to updated image names * README: updated documentation for specifying image names --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 524f466ff..c41bf15be 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -486,7 +486,7 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_image }} + image: {{ openstack_etcd_image }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -529,7 +529,7 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_image }} + image: {{ openstack_lb_image }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -574,7 +574,7 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_image }} + image: {{ openstack_master_image }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -636,7 +636,7 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_image }} + image: {{ openstack_node_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -684,7 +684,7 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_image }} + image: {{ openstack_infra_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -730,7 +730,7 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_image }} + image: {{ openstack_dns_image }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } -- cgit v1.2.1 From 6ebad037254b0c254638f6e6dfbd48e451a1ceeb Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 16 Aug 2017 09:14:06 +0200 Subject: Access UI via a bastion node (#596) When using a bastion and a single master, use the lb-secgrp to access UI port allowed from the ingress bastion node cidr. For HA (masters>1), UI still should be accessed via the LB node's ingress cidr, omitting the bastion. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/defaults/main.yml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 20 +++++++++++++++----- roles/static_inventory/defaults/main.yml | 6 ++++++ roles/static_inventory/tasks/main.yml | 7 +++++++ roles/static_inventory/tasks/sshtun.yml | 15 +++++++++++++++ .../static_inventory/templates/ssh-tunnel.service.j2 | 20 ++++++++++++++++++++ 6 files changed, 64 insertions(+), 5 deletions(-) create mode 100644 roles/static_inventory/tasks/sshtun.yml create mode 100644 roles/static_inventory/templates/ssh-tunnel.service.j2 (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 803a96389..c8529612e 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -13,3 +13,4 @@ num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 use_bastion: False +ui_ssh_tunnel: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c41bf15be..a670ff0e3 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -439,7 +439,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters > 1 %} +{% if num_masters > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -450,14 +450,21 @@ resources: protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% if ui_ssh_tunnel|bool %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% endif %} +{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% endif %} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% endif %} {% endif %} etcd: @@ -695,6 +702,9 @@ resources: - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } +{% endif %} +{% if ui_ssh_tunnel|bool and num_masters < 2 %} + - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 5b8aacf5c..871700f8c 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -20,4 +20,10 @@ private_ssh_key: ~/.ssh/openshift # The patch to store the generated config to access bastion/hosts ssh_config_path: /tmp/ssh.config.ansible +# The IP:port to make an SSH tunnel to access UI on the 1st master +# via bastion node (requires sudo on the ansible control node) +ui_ssh_tunnel: False +ui_port: "{{ openshift_master_api_port | default(8443) }}" +target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}" + openstack_private_network: private diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index b58866017..24e11beb6 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -8,3 +8,10 @@ - name: Generate SSH config for accessing hosts via bastion include: sshconfig.yml when: use_bastion|bool + +- name: Configure SSH tunneling to access UI + include: sshtun.yml + become: true + when: + - use_bastion|bool + - ui_ssh_tunnel|bool diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml new file mode 100644 index 000000000..b0e4c832c --- /dev/null +++ b/roles/static_inventory/tasks/sshtun.yml @@ -0,0 +1,15 @@ +--- +- name: Create ssh tunnel systemd service + template: + src: ssh-tunnel.service.j2 + dest: /etc/systemd/system/ssh-tunnel.service + mode: 0644 + +- name: reload the systemctl daemon after file update + command: systemctl daemon-reload + +- name: Enable ssh tunnel service + service: + name: ssh-tunnel + enabled: true + state: restarted diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2 new file mode 100644 index 000000000..0d1cf8f79 --- /dev/null +++ b/roles/static_inventory/templates/ssh-tunnel.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Set up ssh tunneling for OpenShift cluster UI +After=network.target + +[Service] +ExecStart=/usr/bin/ssh -NT -o \ + ServerAliveInterval=60 -o \ + UserKnownHostsFile=/dev/null -o \ + StrictHostKeyChecking=no -o \ + ExitOnForwardFailure=no -i \ + {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \ + -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }} + + +# Restart every >2 seconds to avoid StartLimitInterval failure +RestartSec=5 +Restart=always + +[Install] +WantedBy=multi-user.target -- cgit v1.2.1 From d41308f238b1c8dac35682e64f661c2e4b01c317 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 16 Aug 2017 11:09:02 +0200 Subject: Set custom hostnames for servers (#643) * README, all.yml, stack_params.yml, heat_stack.yaml.j2: hostname customisation added * hostnames customisation: default set in stack_params * heat_stack: bug fix * fixed commented defaults in group_vars/all.yml --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a670ff0e3..3623035c6 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -483,7 +483,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: etcd + k8s_type: {{ etcd_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -526,7 +526,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: lb + k8s_type: {{ lb_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -571,7 +571,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: master + k8s_type: {{ master_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -624,11 +624,10 @@ resources: properties: name: str_replace: - template: subtype-k8s_type-%index%.cluster_id + template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: node - subtype: app + sub_type_k8s_type: {{ node_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -672,11 +671,10 @@ resources: properties: name: str_replace: - template: subtypek8s_type-%index%.cluster_id + template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: node - subtype: infra + sub_type_k8s_type: {{ infra_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -730,7 +728,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: dns + k8s_type: {{ dns_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: -- cgit v1.2.1 From 998634ffd25a17ff581a124396fd1183706f2478 Mon Sep 17 00:00:00 2001 From: ioggstream Date: Wed, 16 Aug 2017 16:43:58 +0200 Subject: Avoid server recreation in case of user_data modification. (#651) --- roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 1 + roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 | 1 + 2 files changed, 2 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 85af311ec..32fb166f6 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -134,6 +134,7 @@ resources: user_data: get_file: user-data user_data_format: RAW + user_data_update_policy: IGNORE metadata: group: { get_param: group } environment: { get_param: cluster_env } diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index a22b7c6d0..638fc8b45 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -119,6 +119,7 @@ resources: user_data: get_file: user-data user_data_format: RAW + user_data_update_policy: IGNORE metadata: group: { get_param: group } environment: { get_param: cluster_env } -- cgit v1.2.1 From 6a528d5803619f93c734c23be44a2021f1d35ee9 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Thu, 17 Aug 2017 13:48:20 +0200 Subject: Configure different Docker volume sizes for different roles (#644) * README, all.yml, stack_params.yaml, openstack-stack: added docker volume size customisation - app_volume_size changed to node_volume_size (it is node everywhere else) * all.yml, stack_params.yaml,openstack-stack: added customisation for lb, etcd, dns * README: updated * README: updated info about ephemeral volumes --- roles/openstack-stack/defaults/main.yml | 3 ++- roles/openstack-stack/templates/heat_stack.yaml.j2 | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index c8529612e..fbca0bdf6 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -1,5 +1,4 @@ --- -dns_volume_size: 1 ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 @@ -12,5 +11,7 @@ num_dns: 1 num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 +dns_volume_size: 1 +lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 3623035c6..c0da4c184 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -550,7 +550,7 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} - volume_size: 5 + volume_size: {{ lb_volume_size }} depends_on: - interface {% endif %} @@ -658,7 +658,7 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} - volume_size: {{ app_volume_size }} + volume_size: {{ node_volume_size }} depends_on: - interface -- cgit v1.2.1 From f4b584fcef4fad12be931631e0c95ac677799ee7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 16 Aug 2017 11:04:27 +0200 Subject: Add docs and defaults for multi-master setup Additionally, add the lb group to contain lb nodes to the static inventory template. Include the lb group into the OSEv3 group, in order to apply the cluster group vars to it. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/templates/inventory.j2 | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'roles') diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 24dc9d4a8..987c98ec6 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -39,6 +39,7 @@ dns [OSEv3:children] nodes etcd +lb # Set variables common for all OSEv3 hosts #[OSEv3:vars] @@ -68,6 +69,9 @@ nodes.{{ stack_name }} [dns:children] dns.{{ stack_name }} +[lb:children] +lb.{{ stack_name }} + # Empty placeholders for all groups of the cluster nodes [masters.{{ stack_name }}] [etcd.{{ stack_name }}] @@ -75,6 +79,7 @@ dns.{{ stack_name }} [nodes.{{ stack_name }}] [app.{{ stack_name }}] [dns.{{ stack_name }}] +[lb.{{ stack_name }}] # BEGIN Autogenerated groups {% for group in groups %} -- cgit v1.2.1 From ce3be1e039fd3bddf245bdaed83466f12b59937b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Aug 2017 09:25:20 +0200 Subject: Cast num_* as int for jinja templates (#685) Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c0da4c184..1ecf84aa6 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -439,7 +439,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters > 1 or ui_ssh_tunnel|bool %} +{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -513,7 +513,7 @@ resources: depends_on: - interface -{% if num_masters > 1 %} +{% if num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -592,7 +592,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd == 0 %} +{% if num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -701,7 +701,7 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if ui_ssh_tunnel|bool and num_masters < 2 %} +{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } -- cgit v1.2.1 From 2ea1ccfb37461a70d329655f7eeaaab090f1ca0d Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Aug 2017 16:15:40 +0200 Subject: Support external/pre-provisioned authoritative cluster DNS (#690) * Document how to use fully external DNS servers w/o provisioning dns servers group with Heat. * Document how to use a mixed servers setup for dynamic records updates mathing public or private views. * Allow custom nsupdate key names for OSP10 dns service compatibility. The osp-dns configures the named service with the fixed key_name 'update-key'. Add optional key_name for the external_nsupdate_keys public section to allow custom key names. --- roles/dns-records/tasks/main.yml | 6 ++++-- roles/openstack-stack/templates/heat_stack.yaml.j2 | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index 3672a8ea6..e9bce9718 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -14,6 +14,7 @@ nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" when: - external_nsupdate_keys is defined - external_nsupdate_keys['private'] is defined @@ -32,7 +33,7 @@ - view: "private" zone: "{{ full_dns_domain }}" server: "{{ nsupdate_server_private }}" - key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_private }}" key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" @@ -54,6 +55,7 @@ nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" when: - external_nsupdate_keys is defined - external_nsupdate_keys['public'] is defined @@ -72,7 +74,7 @@ - view: "public" zone: "{{ full_dns_domain }}" server: "{{ nsupdate_server_public }}" - key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_public }}" key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" entries: "{{ public_records }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1ecf84aa6..ea2742a2c 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -54,6 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } +{% if num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -68,6 +69,7 @@ outputs: dns_private_ips: description: Private IPs of the DNS value: { get_attr: [ dns, private_ip ] } +{% endif %} resources: @@ -405,6 +407,7 @@ resources: port_range_min: 443 port_range_max: 443 +{% if num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -439,6 +442,8 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + {% if num_masters|int > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup @@ -716,6 +721,7 @@ resources: depends_on: - interface +{% if num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: @@ -755,3 +761,4 @@ resources: volume_size: {{ dns_volume_size }} depends_on: - interface +{% endif %} -- cgit v1.2.1 From 06abd17792fafc3adec3916f56c69800690b1431 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 5 Sep 2017 15:56:43 +0200 Subject: Document global DNS security options (#694) * Document global DNS security options Related changes: * Do not create a view if externally managed. * Allow to specify the recursion settings for public/private views defined by the dns-view role. Signed-off-by: Bogdan Dobrelya * Document public_dns_nameservers better Also use it as the private view forwarder Signed-off-by: Bogdan Dobrelya --- roles/dns-views/defaults/main.yml | 4 ++++ roles/dns-views/tasks/main.yml | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 roles/dns-views/defaults/main.yml (limited to 'roles') diff --git a/roles/dns-views/defaults/main.yml b/roles/dns-views/defaults/main.yml new file mode 100644 index 000000000..c9f8248af --- /dev/null +++ b/roles/dns-views/defaults/main.yml @@ -0,0 +1,4 @@ +--- +external_nsupdate_keys: {} +named_private_recursion: 'yes' +named_public_recursion: 'no' diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml index 7165b4269..ffbad2e3f 100644 --- a/roles/dns-views/tasks/main.yml +++ b/roles/dns-views/tasks/main.yml @@ -8,18 +8,23 @@ set_fact: private_named_view: - name: "private" + recursion: "{{ named_private_recursion }}" acl_entry: "{{ acl_list }}" zone: - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + when: external_nsupdate_keys['private'] is undefined - name: "Generate the public view" set_fact: public_named_view: - name: "public" + recursion: "{{ named_public_recursion }}" zone: - dns_domain: "{{ full_dns_domain }}" forwarder: "{{ public_dns_nameservers }}" + when: external_nsupdate_keys['public'] is undefined - name: "Generate the final named_config_views" set_fact: - named_config_views: "{{ private_named_view + public_named_view }}" + named_config_views: "{{ private_named_view|default([]) + public_named_view|default([]) }}" -- cgit v1.2.1 From daa0b91119d2c16860a19b4ead2d0d128f8bc5ce Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 6 Sep 2017 10:24:16 +0200 Subject: Allow using a provider network (#701) * Allow using a provider network This adds a new option `openstack_provider_network_name` which will take a name of an existing network and put the servers there. It will also prevent creating floating IP addresses as the provider network's IPs should already be accessible without any additional routing required. Fixes #622 * Requested changes Don't fail on external/private networks and use role defaults for the provider network. * Add missing endif --- roles/openstack-stack/defaults/main.yml | 1 + .../tasks/subnet_update_dns_servers.yaml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 113 +++++++++++++++------ .../templates/heat_stack_server.yaml.j2 | 12 +++ roles/static_inventory/tasks/openstack.yml | 25 ++++- 5 files changed, 119 insertions(+), 33 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index fbca0bdf6..c16b5dc00 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -15,3 +15,4 @@ dns_volume_size: 1 lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False +provider_network: None diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml index be4f07b97..af28fc98f 100644 --- a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml +++ b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml @@ -6,3 +6,4 @@ state: present use_default_subnetpool: yes dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" + when: not provider_network diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index ea2742a2c..b6b5e3613 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -73,6 +73,7 @@ outputs: resources: +{% if not provider_network %} net: type: OS::Neutron::Net properties: @@ -129,6 +130,8 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: subnet } +{% endif %} + # keypair: # type: OS::Nova::KeyPair # properties: @@ -501,22 +504,29 @@ resources: image: {{ openstack_etcd_image }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - - { get_resource: common-secgrp } -{% if not use_bastion|bool %} - floating_network: {{ external_network }} -{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } +{% if not use_bastion|bool and not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ etcd_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} {% if num_masters|int > 1 %} loadbalancer: @@ -544,20 +554,29 @@ resources: image: {{ openstack_lb_image }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: lb-secgrp } - - { get_resource: common-secgrp } - floating_network: {{ external_network }} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } + {% if not provider_network %} + floating_network: {{ external_network }} + {% endif %} volume_size: {{ lb_volume_size }} + {% if not provider_network %} depends_on: - interface + {% endif %} {% endif %} masters: @@ -589,8 +608,18 @@ resources: image: {{ openstack_master_image }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} secgrp: {% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } @@ -602,17 +631,14 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } -{% if not use_bastion|bool %} +{% if not use_bastion|bool and not provider_network %} floating_network: {{ external_network }} {% endif %} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} volume_size: {{ master_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} compute_nodes: type: OS::Heat::ResourceGroup @@ -650,22 +676,29 @@ resources: image: {{ openstack_node_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - - { get_resource: common-secgrp } -{% if not use_bastion|bool %} - floating_network: {{ external_network }} -{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } +{% if not use_bastion|bool and not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ node_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} infra_nodes: type: OS::Heat::ResourceGroup @@ -697,8 +730,18 @@ resources: image: {{ openstack_infra_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp {% if openstack_flat_secgrp|default(False)|bool %} @@ -711,15 +754,14 @@ resources: {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } +{% if not provider_network %} floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} +{% endif %} volume_size: {{ infra_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} {% if num_dns|int > 0 %} dns: @@ -747,18 +789,27 @@ resources: image: {{ openstack_dns_image }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: dns-secgrp } - - { get_resource: common-secgrp } - floating_network: {{ external_network }} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ dns_volume_size }} +{% if not provider_network %} depends_on: - interface {% endif %} +{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 32fb166f6..a520a8fe2 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -61,20 +61,24 @@ parameters: label: Net name description: Net name +{% if not provider_network %} subnet: type: string label: Subnet ID description: Subnet resource +{% endif %} secgrp: type: comma_delimited_list label: Security groups description: Security group resources +{% if not provider_network %} floating_network: type: string label: Floating network description: Network to allocate floating IP from +{% endif %} availability_zone: type: string @@ -117,7 +121,11 @@ outputs: - server - addresses - { get_param: net_name } +{% if provider_network %} + - 0 +{% else %} - 1 +{% endif %} - addr resources: @@ -147,15 +155,19 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } +{% if not provider_network %} fixed_ips: - subnet: { get_param: subnet } +{% endif %} security_groups: { get_param: secgrp } +{% if not provider_network %} floating-ip: type: OS::Neutron::FloatingIP properties: floating_network: { get_param: floating_network } port_id: { get_resource: port } +{% endif %} {% if not ephemeral_volumes|default(false)|bool %} cinder_volume: diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 75d0ee6d5..e36974d93 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -24,6 +24,15 @@ when: - refresh_inventory|bool + - name: set_fact for openstack inventory nodes with provider network + set_fact: + registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" + vars: + q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']" + when: + - refresh_inventory|bool + - openstack_provider_network_name|default(None) + - name: Add cluster nodes w/o floating IPs to inventory with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: @@ -49,7 +58,14 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.public_v4 }}{% endif %}" + ansible_host: >- + {% if use_bastion|bool -%} + {{ item.name }} + {%- elif openstack_provider_network_name|default(None) -%} + {{ item.private_v4 }} + {%- else -%} + {{ item.public_v4 }} + {%- endif %} ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' @@ -57,7 +73,12 @@ private_v4: >- {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: '{{ item.public_v4 }}' + public_v4: >- + {% if openstack_provider_network_name|default(None) -%} + {{ item.private_v4 }} + {%- else -%} + {{ item.public_v4 }} + {%- endif %} - name: Add bastion node to inventory add_host: -- cgit v1.2.1 From 97c99ad8582370803e2841b07985260886614eb2 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Wed, 6 Sep 2017 09:36:09 -0400 Subject: Point openshift_master_cluster_public_hostname at master or lb if defined (#706) * Point openshift_master_cluster_public_hostname at master or load balancer if specified * cleanup * remove extraneous brackets * corrections * added doc section * add private records --- roles/dns-records/tasks/main.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'roles') diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index e9bce9718..305a55195 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -9,6 +9,20 @@ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" +- name: "Add public master cluster hostname records to the private A records (single master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the private A records (multi-master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 + - name: "Set the private DNS server to use the external value (if provided)" set_fact: nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" @@ -50,6 +64,20 @@ with_items: "{{ groups['infra_hosts'] }}" when: hostvars[item]['public_v4'] is defined +- name: "Add public master cluster hostname records to the public A records (single master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the public A records (multi-master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 + - name: "Set the public DNS server details to use the external value (if provided)" set_fact: nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" -- cgit v1.2.1 From 1cf6275b983a108a02b6ef178fe35e610162b963 Mon Sep 17 00:00:00 2001 From: Antoni Segura Puimedon Date: Tue, 12 Sep 2017 10:57:38 +0200 Subject: openstack: make server ports be trunk ports (#713) This ensures that the ports that the servers were using before this commit will be parent ports of Neutron trunk ports. Thanks to this, there can be nested Neutron ports inside the OS::NOva::Server resources created either in the heat stack or dynamically inside the Instances. Signed-off-by: Antoni Segura Puimedon --- roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 12 ++++++++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 12 ++++++++++++ 2 files changed, 24 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index a520a8fe2..fc797941e 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -138,7 +138,11 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} - port: { get_resource: port } +{% endif %} user_data: get_file: user-data user_data_format: RAW @@ -151,6 +155,14 @@ resources: sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + port: type: OS::Neutron::Port properties: diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 638fc8b45..2c16ad778 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -115,7 +115,11 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} - port: { get_resource: port } +{% endif %} user_data: get_file: user-data user_data_format: RAW @@ -128,6 +132,14 @@ resources: sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + port: type: OS::Neutron::Port properties: -- cgit v1.2.1 From 074b3e526123da7a62c3d939859389c5f2a357b2 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 12 Sep 2017 11:54:35 -0400 Subject: Add ability to support custom api and console ports (#712) * Add ability to support custom api and console ports * Missed an ingress rule --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index b6b5e3613..1abc67207 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -193,8 +193,12 @@ resources: port_range_max: 4001 - direction: ingress protocol: tcp - port_range_min: 8443 - port_range_max: 8444 + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} - direction: ingress protocol: tcp port_range_min: 8053 @@ -284,8 +288,12 @@ resources: port_range_max: 4001 - direction: ingress protocol: tcp - port_range_min: 8443 - port_range_max: 8444 + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} - direction: ingress protocol: tcp port_range_min: 8053 -- cgit v1.2.1 From b6dd8f112cd5506923b4b3ce51a1774b0bfc037c Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 11 Sep 2017 14:57:09 +0200 Subject: Pre-create a Cinder registry volume --- roles/openstack-create-cinder-registry/tasks/main.yaml | 5 +++++ roles/static_inventory/templates/inventory.j2 | 8 +++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 roles/openstack-create-cinder-registry/tasks/main.yaml (limited to 'roles') diff --git a/roles/openstack-create-cinder-registry/tasks/main.yaml b/roles/openstack-create-cinder-registry/tasks/main.yaml new file mode 100644 index 000000000..6e9d1c2e7 --- /dev/null +++ b/roles/openstack-create-cinder-registry/tasks/main.yaml @@ -0,0 +1,5 @@ +--- +- os_volume: + display_name: "{{ cinder_hosted_registry_name }}" + size: "{{ cinder_hosted_registry_size_gb }}" + register: cinder_registry_volume diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 987c98ec6..640a46ba2 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -42,10 +42,16 @@ etcd lb # Set variables common for all OSEv3 hosts -#[OSEv3:vars] +[OSEv3:vars] # For OSEv3 normal group vars, see ./group_vars/OSEv3.yml +{% if cinder_registry_volume is defined %} +openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" +openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" +{% endif %} + + # Host Groups [masters:children] -- cgit v1.2.1 From d2be3821ae085ec7faa2091df7abaf7279a983e3 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 13 Sep 2017 14:03:27 +0200 Subject: Fix the cinder_registry_volume conditional Deployments without the cinder registry would fail, because the `cinder_registry_volume` variable is still set even when we don't actually create the volume. --- roles/static_inventory/templates/inventory.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 640a46ba2..2245963c0 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -46,7 +46,7 @@ lb # For OSEv3 normal group vars, see ./group_vars/OSEv3.yml -{% if cinder_registry_volume is defined %} +{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %} openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" {% endif %} -- cgit v1.2.1 From 8a204aaec709135ebfa716459f2ba3bcf1db4f04 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 13 Sep 2017 17:44:45 +0200 Subject: Clear the previous inventory during provisioning If there was a left-over inventory from a previous run that had nodes which were subsequently removed, these would still show up in the Ansible's in-memory inventory and Ansible would fail trying to connect to them. This is because Ansible automatically loads the `inventory/hosts` file if it exists and even if we overwrite it later, every node and group still remains in the memory. By removing the inventory file and and calling the `refresh_inventory` meta task, we make sure that any left-over values are removed. --- roles/static_inventory/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'roles') diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index 24e11beb6..3dab62df2 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -1,4 +1,12 @@ --- +- name: Remove any existing inventory + file: + path: "{{ inventory_path }}/hosts" + state: absent + +- name: Refresh the inventory + meta: refresh_inventory + - name: Generate in-memory inventory include: openstack.yml -- cgit v1.2.1 From 288fef2dd2d74baab729d7c8b628a32d337da9bc Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 19 Sep 2017 16:36:57 +0200 Subject: Empty ssh (#729) * Make `openstack_private_ssh_key` optional Before this, the deployer could not reasonably rely on their own SSH configuration or e.g. using the `--private-key` option to ansible-playbook because we always wrote the `ansible_private_key_file` value in the static inventory. This change makes the `openstack_private_ssh_key` variable truly optional: if it's not set, the static inventory will not configure the SSH key and will just rely on the existing configuration. * Update the openstack e2e CI It no longer sets the SSH keys explicitly -- which should just work with the previous commit. * Put back the `openstack_ssh_public_key` in CI This is the option we actually need to keep. This sholud fix the CI failures. --- roles/static_inventory/templates/inventory.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 2245963c0..8863fb7c4 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -12,7 +12,7 @@ %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} {% if 'ansible_user' in hostvars[host] %} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} -{% if 'ansible_private_key_file' in hostvars[host] +{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file'] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} {% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] %} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} -- cgit v1.2.1 From 0cded5e5474a11ef5ae9c19f7e9141b6bacf14e8 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Thu, 21 Sep 2017 11:30:10 -0400 Subject: load balancer formatting fix (#745) --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1abc67207..7acef5a6b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -577,14 +577,14 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } - {% if not provider_network %} +{% if not provider_network %} floating_network: {{ external_network }} - {% endif %} +{% endif %} volume_size: {{ lb_volume_size }} - {% if not provider_network %} +{% if not provider_network %} depends_on: - interface - {% endif %} +{% endif %} {% endif %} masters: -- cgit v1.2.1 From d361dc4b307781ec2bb5978f30516f266a34188c Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Tue, 26 Sep 2017 13:39:55 +0200 Subject: Upscaling OpenShift application nodes (#571) * scale-up: playbook for upscaling app nodes * scale-up: removed debug * scale-up: made suggested changes * scale-up: indentation fix * upscaling: process split into two playbooks that are executed by a bash script - upscaling_run.sh: bash script, usage displayed using -h parameter - upscaling_pre-tasks: check that new value is higher, change inventory variable - upscaling_scale-up: rerun provisioning and installation, verify change * upscaling_run: fixed openshift-ansible-contrib directory name * upscaling_run: inventory can be entered as relative path * upscaling_scale-up: fixed formatting * upscaling: minor changes * upscaling: moved to .../provisioning/openstack directory, README updated, minor changes made * README: minor changes * README: formatting * uspcaling: minor fix * upscaling: fix * upscaling: added customisations, fixes - openshift-ansible-contrib and openshift-ansible paths are customisable - fixed implicit incrementation by 1 * upscaling: fixes * upscaling: fixes * upscaling: another fix * upscaling: another fix * upscaling: fix * upscaling: back to a single playbook, README updated * minor fix * pre_tasks: added labels for autoscaling * scale-up: fixes * scale-up: fixed host variables, post-verification is only based on labels * scale-up: added openshift-ansible path customisation - path has to be absolute, cannot contain '/' at the end * scale-up: fix * scale-up: debug removed * README: added docs on openshift_ansible_dir, note about bastion * static_inventory: newly added nodes are added to new_nodes group - note: re-running provisioning fails when trying to install docker * removing new line * scale-up: running byo/config.yml or scaleup.yml based on the situation - (whether there is an existing deployment or not) * openstack.yml: indentation fix * added refresh inventory * upscaling: new_nodes only contains new does, it is not used during the first deployment * static_inventory: make sure that new nodes end up only in their new_nodes group * bug fixes * another fix * fixed condition * scale-up, static_inventory role: all app node data gathered before provisioning * upscaling: bug fixes * upscaling: another fixes * fixes * upscaling: fix * upscaling: fix * upscaling: another logic fix * bug fix for non-scaling deployments --- .../tasks/filter_out_new_app_nodes.yaml | 15 +++++++++++++ roles/static_inventory/tasks/openstack.yml | 26 ++++++++++++++++++++-- roles/static_inventory/templates/inventory.j2 | 4 ++++ 3 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 roles/static_inventory/tasks/filter_out_new_app_nodes.yaml (limited to 'roles') diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml new file mode 100644 index 000000000..826efe78d --- /dev/null +++ b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml @@ -0,0 +1,15 @@ +--- +- name: Add all new app nodes to new_app_nodes + when: + - 'oc_old_app_nodes is defined' + - 'oc_old_app_nodes | list' + - 'node.name not in oc_old_app_nodes' + - 'node["metadata"]["sub-host-type"] == "app"' + register: result + set_fact: + new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]' + +- name: If the node was added to new_nodes, remove it from registered nodes + set_fact: + registered_nodes: '{{ registered_nodes | difference([ node ]) }}' + when: 'not result | skipped' diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index e36974d93..adf78c966 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -37,7 +37,6 @@ with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' ansible_host: >- {% if use_bastion|bool -%} {{ item.name }} @@ -57,7 +56,6 @@ with_items: "{{ registered_nodes_floating }}" add_host: name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' ansible_host: >- {% if use_bastion|bool -%} {{ item.name }} @@ -80,6 +78,30 @@ {{ item.public_v4 }} {%- endif %} + # Split registered_nodes into old nodes and new app nodes + # Add new app nodes to new_nodes host group for upscaling + - name: Create new_app_nodes variable + set_fact: + new_app_nodes: [] + + - name: Filter new app nodes out of registered_nodes + include: filter_out_new_app_nodes.yaml + with_items: "{{ registered_nodes }}" + loop_control: + loop_var: node + + - name: Add new app nodes to the new_nodes section (if a deployment already exists) + with_items: "{{ new_app_nodes }}" + add_host: + name: "{{ item.name }}" + groups: new_nodes, app + + - name: Add the rest of cluster nodes to their corresponding groups + with_items: "{{ registered_nodes }}" + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + - name: Add bastion node to inventory add_host: name: bastion diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 8863fb7c4..9dfbe3a5b 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -40,6 +40,7 @@ dns nodes etcd lb +new_nodes # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -78,6 +79,8 @@ dns.{{ stack_name }} [lb:children] lb.{{ stack_name }} +[new_nodes:children] + # Empty placeholders for all groups of the cluster nodes [masters.{{ stack_name }}] [etcd.{{ stack_name }}] @@ -86,6 +89,7 @@ lb.{{ stack_name }} [app.{{ stack_name }}] [dns.{{ stack_name }}] [lb.{{ stack_name }}] +[new_nodes.{{ stack_name }}] # BEGIN Autogenerated groups {% for group in groups %} -- cgit v1.2.1 From 4669bf33d611555613dec904b1b33a1908f0a35b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 26 Sep 2017 14:36:12 +0200 Subject: Fix public master cluster DNS record when using bastion (#752) When using a bastion and a single master, add the bastion node's public IP the public master's IP for the DNS record. Signed-off-by: Bogdan Dobrelya --- roles/dns-records/defaults/main.yml | 2 ++ roles/dns-records/tasks/main.yml | 9 +++++++++ 2 files changed, 11 insertions(+) create mode 100644 roles/dns-records/defaults/main.yml (limited to 'roles') diff --git a/roles/dns-records/defaults/main.yml b/roles/dns-records/defaults/main.yml new file mode 100644 index 000000000..3f7fa783f --- /dev/null +++ b/roles/dns-records/defaults/main.yml @@ -0,0 +1,2 @@ +--- +use_bastion: False diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index 305a55195..7148b016a 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -70,6 +70,15 @@ when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - openstack_num_masters == 1 + - not use_bastion|bool + +- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: -- cgit v1.2.1 From 15be1ebcf1705bc5e9347463594f50cc9d0f27b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 2 Oct 2017 21:08:45 +0000 Subject: Adding the option to use 'stack_state' to allow for easy de-provisioning (#754) * Adding 'openstack-stack-delete' role to allow for easy de-provisioning * Updated per etsauer's comments --- roles/openstack-stack/defaults/main.yml | 3 ++ roles/openstack-stack/tasks/cleanup.yml | 6 +++ roles/openstack-stack/tasks/generate-templates.yml | 32 +++++++++++++ roles/openstack-stack/tasks/main.yml | 54 ++++++---------------- 4 files changed, 56 insertions(+), 39 deletions(-) create mode 100644 roles/openstack-stack/tasks/cleanup.yml create mode 100644 roles/openstack-stack/tasks/generate-templates.yml (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index c16b5dc00..6f1949286 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -1,4 +1,7 @@ --- + +stack_state: 'present' + ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 diff --git a/roles/openstack-stack/tasks/cleanup.yml b/roles/openstack-stack/tasks/cleanup.yml new file mode 100644 index 000000000..258334a6b --- /dev/null +++ b/roles/openstack-stack/tasks/cleanup.yml @@ -0,0 +1,6 @@ +--- + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml new file mode 100644 index 000000000..0ce9a3eec --- /dev/null +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -0,0 +1,32 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate HOT server w/o floating IPs template from jinja2 template + template: + src: heat_stack_server_nofloating.yaml.j2 + dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" + when: use_bastion|bool + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 9b4855294..983567026 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,51 +1,27 @@ --- -- name: create HOT stack template prefix - register: stack_template_pre - tempfile: - state: directory - prefix: casl-ansible -- name: set template paths - set_fact: - stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - user_data_template_path: "{{ stack_template_pre.path }}/user-data" +- name: Generate the templates + include: generate-templates.yml + when: + - stack_state == 'present' -- name: generate HOT stack template from jinja2 template - template: - src: heat_stack.yaml.j2 - dest: "{{ stack_template_path }}" - -- name: generate HOT server template from jinja2 template - template: - src: heat_stack_server.yaml.j2 - dest: "{{ stack_template_pre.path }}/server.yaml" - -- name: generate HOT server w/o floating IPs template from jinja2 template - template: - src: heat_stack_server_nofloating.yaml.j2 - dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" - when: use_bastion|bool - -- name: generate user_data from jinja2 template - template: - src: user_data.j2 - dest: "{{ user_data_template_path }}" - -- name: create stack +- name: Handle the Stack (create/delete) ignore_errors: False register: stack_create os_stack: name: "{{ stack_name }}" - state: present - template: "{{ stack_template_path }}" + state: "{{ stack_state }}" + template: "{{ stack_template_path | default(omit) }}" wait: yes # NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for # dns_nameservers, so we can't do that for the "create stack" task. - include: subnet_update_dns_servers.yaml - when: private_dns_server is defined - -- name: cleanup temp files - file: - path: "{{ stack_template_pre.path }}" - state: absent + when: + - private_dns_server is defined + - stack_state == 'present' + +- name: CleanUp + include: cleanup.yml + when: + - stack_state == 'present' -- cgit v1.2.1 From c969394a52c311f1ff5cc2fc669276bc8e2b4e4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20M=C3=ADnguez?= Date: Wed, 4 Oct 2017 03:31:56 +0200 Subject: Required variables to create dedicated lv (#766) * Required variables to create dedicated lv https://bugzilla.redhat.com/show_bug.cgi?id=1490910#c11 * Fixed lint and added distribution to checks --- roles/docker-storage-setup/defaults/main.yaml | 2 ++ roles/docker-storage-setup/tasks/main.yaml | 35 +++++++++++++++++----- .../templates/docker-storage-setup-dm.j2 | 4 +++ .../templates/docker-storage-setup-overlayfs.j2 | 6 ++++ .../templates/docker-storage-setup.j2 | 4 --- 5 files changed, 40 insertions(+), 11 deletions(-) create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup.j2 (limited to 'roles') diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml index e36f1b85a..062f543ad 100644 --- a/roles/docker-storage-setup/defaults/main.yaml +++ b/roles/docker-storage-setup/defaults/main.yaml @@ -3,3 +3,5 @@ docker_dev: "/dev/sdb" docker_vg: "docker-vol" docker_data_size: "95%VG" docker_dm_basesize: "3G" +container_root_lv_name: "dockerlv" +container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 7202bc46b..209062ca7 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,8 +1,29 @@ --- -- name: create the docker-storage-setup config file - template: - src: "{{ role_path }}/templates/docker-storage-setup.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 +- block: + - name: create the docker-storage config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + when: + - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + when: + - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution == "RedHat" + +- name: start docker + service: name=docker state=started enabled=true diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 new file mode 100644 index 000000000..b5869feff --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 new file mode 100644 index 000000000..4bef865c8 --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 @@ -0,0 +1,6 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +STORAGE_DRIVER=overlay2 +CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 deleted file mode 100644 index b5869feff..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup.j2 +++ /dev/null @@ -1,4 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.1 From 2abe439cd35321e6388c25d5c8e4e6f1fa77e796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Wed, 4 Oct 2017 15:03:17 +0000 Subject: Fixing various contrib changes causing CASL breakage (#771) --- roles/openstack-stack/defaults/main.yml | 2 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 24 +++++++++++----------- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 6f1949286..a24e684cc 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -18,4 +18,4 @@ dns_volume_size: 1 lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False -provider_network: None +provider_network: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 7acef5a6b..ef46211a4 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -499,7 +499,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname }} + k8s_type: {{ etcd_hostname | default('etcd') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -509,7 +509,7 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_etcd_image }} + image: {{ openstack_etcd_image | default(openstack_image) }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -549,7 +549,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname }} + k8s_type: {{ lb_hostname | default('lb') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -559,7 +559,7 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_lb_image }} + image: {{ openstack_lb_image | default(openstack_image) }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -603,7 +603,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname }} + k8s_type: {{ master_hostname | default('master')}} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -613,7 +613,7 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_master_image }} + image: {{ openstack_master_image | default(openstack_image) }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -666,7 +666,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname }} + sub_type_k8s_type: {{ node_hostname | default('app-node') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -681,7 +681,7 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image }} + image: {{ openstack_node_image | default(openstack_image) }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -720,7 +720,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname }} + sub_type_k8s_type: {{ infra_hostname | default('infranode') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -735,7 +735,7 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image }} + image: {{ openstack_infra_image | default(openstack_image) }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -784,7 +784,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname }} + k8s_type: {{ dns_hostname | default('dns') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -794,7 +794,7 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_dns_image }} + image: {{ openstack_dns_image | default(openstack_image) }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} -- cgit v1.2.1 From 1c73318927fe1730fa4c52fc684a94d37d12a5fd Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 6 Oct 2017 09:20:53 +0200 Subject: Replace the CASL references (#778) Following up on the initial port of the OpenStack roles from casl-ansible to openshift-ansible-contrib. One of the points that was brought up in the review was to drop the references to CASL in the code since the code has now wider reach. --- roles/openstack-stack/README.md | 2 +- roles/openstack-stack/tasks/generate-templates.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md index 509c9de6c..32a2b49f1 100644 --- a/roles/openstack-stack/README.md +++ b/roles/openstack-stack/README.md @@ -5,5 +5,5 @@ Role for spinning up instances using OpenStack Heat. ## To Test ``` -ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml ``` diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml index 0ce9a3eec..110da8444 100644 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -3,7 +3,7 @@ register: stack_template_pre tempfile: state: directory - prefix: casl-ansible + prefix: openshift-ansible - name: set template paths set_fact: -- cgit v1.2.1 From 5a6c1927828d942ebe4c71861521c0dae6545011 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 9 Oct 2017 09:57:08 +0000 Subject: Add CentOS support to the docker-storage-setup role This let's us use the role on CentOS systems, as well as RHEL. In addition, it installs docker and makes sure it's restarted (as opposed to just "started" which has no effect when docker is already running). --- roles/docker-storage-setup/tasks/main.yaml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 209062ca7..8dabb1cc7 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -25,5 +25,21 @@ - ansible_distribution_version | version_compare('7.4', '<') - ansible_distribution == "RedHat" +- block: + - name: create the docker-storage-setup config file for CentOS + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + # TODO(shadower): Find out which CentOS version supports overlayfs2 + when: + - ansible_distribution == "CentOS" + +- name: Install Docker + package: name=docker state=present + - name: start docker - service: name=docker state=started enabled=true + service: name=docker state=restarted enabled=true -- cgit v1.2.1 From af9f352d64fba76fcaed7de4e2b35b44ddf10e2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20M=C3=ADnguez?= Date: Tue, 10 Oct 2017 20:02:50 +0200 Subject: Fix for this issue https://bugzilla.redhat.com/show_bug.cgi?id=1495372 (#793) --- roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 | 1 + 1 file changed, 1 insertion(+) (limited to 'roles') diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 index 4bef865c8..d8b4a0276 100644 --- a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 @@ -4,3 +4,4 @@ DATA_SIZE="{{ docker_data_size }}" STORAGE_DRIVER=overlay2 CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_SIZE=100%FREE -- cgit v1.2.1 From b450ff75888f7801094ca88957a237f33f5e85f1 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Fri, 13 Oct 2017 05:21:26 -0400 Subject: Allow the specification of server group policies when provisioning openstack (#747) * Allow for the specifying of server policies during OpenStack provisioning * documentation for openstack server group policies * add doc link detailing allowed policies * changed default to anti-affinity --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 22 ++++++++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 6 ++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 6 ++++++ 3 files changed, 34 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index ef46211a4..a6b088efb 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -536,6 +536,20 @@ resources: - interface {% endif %} +{% if master_server_group_policies|length > 0 %} + master_server_group: + type: OS::Nova::ServerGroup + properties: + name: master_server_group + policies: {{ master_server_group_policies }} +{% endif %} +{% if infra_server_group_policies|length > 0 %} + infra_server_group: + type: OS::Nova::ServerGroup + properties: + name: infra_server_group + policies: {{ infra_server_group_policies }} +{% endif %} {% if num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup @@ -643,6 +657,10 @@ resources: floating_network: {{ external_network }} {% endif %} volume_size: {{ master_volume_size }} +{% if master_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: master_server_group } +{% endif %} {% if not provider_network %} depends_on: - interface @@ -766,6 +784,10 @@ resources: floating_network: {{ external_network }} {% endif %} volume_size: {{ infra_volume_size }} +{% if infra_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: infra_server_group } +{% endif %} {% if not provider_network %} depends_on: - interface diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index fc797941e..66c2491a9 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -98,6 +98,11 @@ parameters: description: OpenShift Node Labels default: {"region": "default" } + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + outputs: name: @@ -154,6 +159,7 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} trunk-port: diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 2c16ad778..4b79d5ab6 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -89,6 +89,11 @@ parameters: description: OpenShift Node Labels default: {"region": "default" } + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + outputs: name: @@ -131,6 +136,7 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} trunk-port: -- cgit v1.2.1 From ca88364175fe5177cecbb479a157d7329db05d8a Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 16 Oct 2017 15:42:42 +0200 Subject: Support separate data network for Flannel SDN (#757) * Support separate data network for Flannel SDN Document the use case for a separate flannel data network. Allow Nova servers for openshift cluster to be provisioned with that isolated data network created and connected to masters, computes and infra nodes. Do not configure dns nameservers and router for that network. Signed-off-by: Bogdan Dobrelya * Fix flannel use cases with provider network Provider network cannot be used with flannel SDN as the latter requires a separate isolated network, while the provider network is an externally managed single network. Signed-off-by: Bogdan Dobrelya * Drop unused data_net_name Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 31 ++++++++++++ .../templates/heat_stack_server.yaml.j2 | 57 ++++++++++++++++++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 55 +++++++++++++++++++++ 3 files changed, 143 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a6b088efb..1f1e33cf2 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -113,6 +113,22 @@ resources: - {{ nameserver }} {% endfor %} +{% if openshift_use_flannel|default(False)|bool %} + data_net: + type: OS::Neutron::Net + properties: + name: openshift-ansible-{{ stack_name }}-data-net + port_security_enabled: false + + data_subnet: + type: OS::Neutron::Subnet + properties: + name: openshift-ansible-{{ stack_name }}-data-subnet + network: { get_resource: data_net } + cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} + gateway_ip: null +{% endif %} + router: type: OS::Neutron::Router properties: @@ -641,6 +657,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: {% if openstack_flat_secgrp|default(False)|bool %} @@ -713,6 +734,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } @@ -767,6 +793,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 66c2491a9..6552e0a0d 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -68,6 +68,28 @@ parameters: description: Subnet resource {% endif %} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + +{% if not provider_network %} + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} +{% endif %} + secgrp: type: comma_delimited_list label: Security groups @@ -133,6 +155,11 @@ outputs: {% endif %} - addr +{% if openshift_use_flannel|default(False)|bool %} +conditions: + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + resources: server: @@ -143,10 +170,27 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} {% if use_trunk_ports|default(false)|bool %} - port: { get_attr: [trunk-port, port_id] } {% else %} - port: { get_resource: port } +{% endif %} {% endif %} user_data: get_file: user-data @@ -179,6 +223,19 @@ resources: {% endif %} security_groups: { get_param: secgrp } +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + {% if not provider_network %} floating-ip: type: OS::Neutron::FloatingIP diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 4b79d5ab6..742d53649 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -66,6 +66,26 @@ parameters: label: Subnet ID description: Subnet resource +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} + secgrp: type: comma_delimited_list label: Security groups @@ -110,6 +130,11 @@ outputs: - 0 - addr +{% if openshift_use_flannel|default(False)|bool %} +conditions: + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + resources: server_nofloating: @@ -120,10 +145,27 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} {% if use_trunk_ports|default(false)|bool %} - port: { get_attr: [trunk-port, port_id] } {% else %} - port: { get_resource: port } +{% endif %} {% endif %} user_data: get_file: user-data @@ -154,6 +196,19 @@ resources: - subnet: { get_param: subnet } security_groups: { get_param: secgrp } +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + {% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume -- cgit v1.2.1 From e89bd6b1cb32ad52f0109f80022e801943b51893 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 17 Oct 2017 10:12:59 +0200 Subject: [WIP] Merge server with nofloating server heat templates (#761) Merge server with nofloating server heat templates --- roles/openstack-stack/tasks/generate-templates.yml | 6 - roles/openstack-stack/templates/heat_stack.yaml.j2 | 42 ++-- .../templates/heat_stack_server.yaml.j2 | 12 +- .../templates/heat_stack_server_nofloating.yaml.j2 | 225 --------------------- 4 files changed, 35 insertions(+), 250 deletions(-) delete mode 100644 roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 (limited to 'roles') diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml index 110da8444..0ff50a095 100644 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -20,12 +20,6 @@ src: heat_stack_server.yaml.j2 dest: "{{ stack_template_pre.path }}/server.yaml" -- name: generate HOT server w/o floating IPs template from jinja2 template - template: - src: heat_stack_server_nofloating.yaml.j2 - dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" - when: use_bastion|bool - - name: generate user_data from jinja2 template template: src: user_data.j2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1f1e33cf2..a69b7fc00 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -71,6 +71,9 @@ outputs: value: { get_attr: [ dns, private_ip ] } {% endif %} +conditions: + no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + resources: {% if not provider_network %} @@ -504,11 +507,7 @@ resources: properties: count: {{ num_etcd }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -543,8 +542,13 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ etcd_volume_size }} {% if not provider_network %} @@ -622,11 +626,7 @@ resources: properties: count: {{ num_masters }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -674,8 +674,13 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ master_volume_size }} {% if master_server_group_policies|length > 0 %} @@ -694,11 +699,7 @@ resources: removal_policies: - resource_list: {{ nodes_to_remove }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -743,8 +744,13 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ node_volume_size }} {% if not provider_network %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 6552e0a0d..9ffe721a5 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -95,9 +95,17 @@ parameters: label: Security groups description: Security group resources + attach_float_net: + type: boolean + default: true + + label: Attach-float-net + description: A switch for floating network port connection + {% if not provider_network %} floating_network: type: string + default: '' label: Floating network description: Network to allocate floating IP from {% endif %} @@ -155,8 +163,9 @@ outputs: {% endif %} - addr -{% if openshift_use_flannel|default(False)|bool %} conditions: + no_floating: {not: { get_param: attach_float_net} } +{% if openshift_use_flannel|default(False)|bool %} no_data_subnet: {not: { get_param: attach_data_net} } {% endif %} @@ -238,6 +247,7 @@ resources: {% if not provider_network %} floating-ip: + condition: { not: no_floating } type: OS::Neutron::FloatingIP properties: floating_network: { get_param: floating_network } diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 deleted file mode 100644 index 742d53649..000000000 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ /dev/null @@ -1,225 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster server w/o floating IP - -parameters: - - name: - type: string - label: Name - description: Name - - group: - type: string - label: Host Group - description: The Primary Ansible Host Group - default: host - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: - type: boolean - default: false - label: Attach-data-net - description: A switch for data port connection - - data_net: - type: string - default: '' - label: Net ID - description: Net resource - - data_subnet: - type: string - default: '' - label: Subnet ID - description: Subnet resource -{% endif %} - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - node_labels: - type: json - description: OpenShift Node Labels - default: {"region": "default" } - - scheduler_hints: - type: json - description: Server scheduler hints. - default: {} - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server_nofloating, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server_nofloating - - addresses - - { get_param: net_name } - - 0 - - addr - -{% if openshift_use_flannel|default(False)|bool %} -conditions: - no_data_subnet: {not: { get_param: attach_data_net} } -{% endif %} - -resources: - - server_nofloating: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: -{% if openshift_use_flannel|default(False)|bool %} - if: - - no_data_subnet -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } -{% endif %} -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } - - port: { get_resource: data_port } -{% endif %} - -{% else %} -{% if use_trunk_ports|default(false)|bool %} - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - port: { get_resource: port } -{% endif %} -{% endif %} - user_data: - get_file: user-data - user_data_format: RAW - user_data_update_policy: IGNORE - metadata: - group: { get_param: group } - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - node_labels: { get_param: node_labels } - scheduler_hints: { get_param: scheduler_hints } - -{% if use_trunk_ports|default(false)|bool %} - trunk-port: - type: OS::Neutron::Trunk - properties: - name: { get_param: name } - port: { get_resource: port } -{% endif %} - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - -{% if openshift_use_flannel|default(False)|bool %} - data_port: - type: OS::Neutron::Port - condition: { not: no_data_subnet } - properties: - network: { get_param: data_net } - port_security_enabled: false -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: data_subnet } -{% endif %} -{% endif %} - -{% if not ephemeral_volumes|default(false)|bool %} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server_nofloating } - mountpoint: /dev/sdb -{% endif %} -- cgit v1.2.1 From 8d14089a84119d4b824bfea991099941122a2c12 Mon Sep 17 00:00:00 2001 From: Chandler Wilkerson Date: Tue, 17 Oct 2017 07:53:18 -0500 Subject: Docker storage fix (#812) * Added task to stop docker before templating config * Rearranged storage roles in rhv install --- roles/docker-storage-setup/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 209062ca7..dbff85662 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,4 +1,7 @@ --- +- name: stop docker + service: name=docker state=stopped + - block: - name: create the docker-storage config file template: @@ -7,7 +10,6 @@ owner: root group: root mode: 0644 - when: - ansible_distribution_version | version_compare('7.4', '>=') - ansible_distribution == "RedHat" @@ -20,7 +22,6 @@ owner: root group: root mode: 0644 - when: - ansible_distribution_version | version_compare('7.4', '<') - ansible_distribution == "RedHat" -- cgit v1.2.1 From d2ff422b284f04b8a19ad4c6aa388ba397d915e1 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 18 Oct 2017 12:53:31 +0200 Subject: Add Flannel support (#814) * Add flannel support * Document Flannel SDN use case for a separate data network. * Add post install step for flannel SDN * Configure iptables rules as described for OCP 3.4 refarch https://access.redhat.com/documentation/en-us/reference_architectures/2017/html/deploying_red_hat_openshift_container_platform_3.4_on_red_hat_openstack_platform_10/emphasis_manual_deployment_emphasis#run_ansible_installer * Configure flannel interface options Signed-off-by: Bogdan Dobrelya * Use os_firewall from galaxy for required flannel rules For flannel SDN: * Add openshift-ansible as a galaxy dependency module. * Use openshift-ansible/roles/os_firewall to apply DNS rules for flanel SDN. * Apply the remaining advanced rules with direct iptables commands as os_firewall do not support advanced rules. * Persist only iptables rules w/o dynamic KUBe rules. Those are added runtime and need restoration after reboot or iptables restart. * Configure and enable the masked iptables service on the app nodes. Enable it to allow the in-memory rules to be persisted. Disable firewalld, which is the expected default behavior of the os_firewall module. Signed-off-by: Bogdan Dobrelya * Allow access from nodes to masters' port 2379 when using flannel Flannel requires to gather information from etcd to configure and assign the subnets in the nodes, therefore, allow access from nodes to port 2379/tcp to the master security group. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a69b7fc00..2359842a5 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -341,6 +341,12 @@ resources: protocol: tcp port_range_min: 9090 port_range_max: 9090 +{% if openshift_use_flannel|default(False)|bool %} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 +{% endif %} etcd-secgrp: type: OS::Neutron::SecurityGroup -- cgit v1.2.1 From 4ed9aef6f8ed0850e70b498e780d0d8e22bc277f Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 23 Oct 2017 12:57:29 +0200 Subject: Add openshift_openstack role and move tasks there All the tasks that were previously in playbooks are now under `roles/openshift_openstack`. The `openshift-cluster` directory now only contains playbooks that include tasks from that role. This makes the structure much closer to that of the AWS provider. --- roles/common/defaults/main.yml | 6 - roles/dns-records/defaults/main.yml | 2 - roles/dns-records/tasks/main.yml | 121 --- roles/dns-server-detect/defaults/main.yml | 3 - roles/dns-server-detect/tasks/main.yml | 36 - roles/dns-views/defaults/main.yml | 4 - roles/dns-views/tasks/main.yml | 30 - roles/docker-storage-setup/defaults/main.yaml | 7 - roles/docker-storage-setup/tasks/main.yaml | 46 -- .../templates/docker-storage-setup-dm.j2 | 4 - .../templates/docker-storage-setup-overlayfs.j2 | 7 - roles/hostnames/tasks/main.yaml | 26 - roles/hostnames/test/inv | 12 - roles/hostnames/test/roles | 1 - roles/hostnames/test/test.yaml | 4 - roles/hostnames/vars/main.yaml | 2 - roles/hostnames/vars/records.yaml | 28 - roles/node-network-manager/tasks/main.yml | 22 - roles/openshift-prep/defaults/main.yml | 13 - roles/openshift-prep/tasks/main.yml | 4 - roles/openshift-prep/tasks/prerequisites.yml | 37 - roles/openshift_openstack/defaults/main.yml | 49 ++ .../tasks/check-prerequisites.yml | 109 +++ roles/openshift_openstack/tasks/cleanup.yml | 6 + .../tasks/container-storage-setup.yml | 37 + .../tasks/custom_flavor_check.yaml | 9 + .../tasks/custom_image_check.yaml | 10 + .../tasks/generate-templates.yml | 26 + roles/openshift_openstack/tasks/hostname.yml | 33 + .../openshift_openstack/tasks/net_vars_check.yaml | 14 + .../tasks/node-configuration.yml | 11 + roles/openshift_openstack/tasks/node-network.yml | 19 + roles/openshift_openstack/tasks/node-packages.yml | 15 + roles/openshift_openstack/tasks/populate-dns.yml | 5 + .../tasks/prepare-and-format-cinder-volume.yaml | 59 ++ roles/openshift_openstack/tasks/provision.yml | 30 + .../tasks/subnet_update_dns_servers.yaml | 9 + .../templates/docker-storage-setup-dm.j2 | 4 + .../templates/docker-storage-setup-overlayfs.j2 | 7 + .../templates/heat_stack.yaml.j2 | 888 +++++++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 270 +++++++ roles/openshift_openstack/templates/user_data.j2 | 13 + roles/openshift_openstack/vars/main.yml | 49 ++ roles/openstack-stack/tasks/main.yml | 1 - .../tasks/subnet_update_dns_servers.yaml | 9 - 45 files changed, 1672 insertions(+), 425 deletions(-) delete mode 100644 roles/common/defaults/main.yml delete mode 100644 roles/dns-records/defaults/main.yml delete mode 100644 roles/dns-records/tasks/main.yml delete mode 100644 roles/dns-server-detect/defaults/main.yml delete mode 100644 roles/dns-server-detect/tasks/main.yml delete mode 100644 roles/dns-views/defaults/main.yml delete mode 100644 roles/dns-views/tasks/main.yml delete mode 100644 roles/docker-storage-setup/defaults/main.yaml delete mode 100644 roles/docker-storage-setup/tasks/main.yaml delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 delete mode 100644 roles/hostnames/tasks/main.yaml delete mode 100644 roles/hostnames/test/inv delete mode 120000 roles/hostnames/test/roles delete mode 100644 roles/hostnames/test/test.yaml delete mode 100644 roles/hostnames/vars/main.yaml delete mode 100644 roles/hostnames/vars/records.yaml delete mode 100644 roles/node-network-manager/tasks/main.yml delete mode 100644 roles/openshift-prep/defaults/main.yml delete mode 100644 roles/openshift-prep/tasks/main.yml delete mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openshift_openstack/defaults/main.yml create mode 100644 roles/openshift_openstack/tasks/check-prerequisites.yml create mode 100644 roles/openshift_openstack/tasks/cleanup.yml create mode 100644 roles/openshift_openstack/tasks/container-storage-setup.yml create mode 100644 roles/openshift_openstack/tasks/custom_flavor_check.yaml create mode 100644 roles/openshift_openstack/tasks/custom_image_check.yaml create mode 100644 roles/openshift_openstack/tasks/generate-templates.yml create mode 100644 roles/openshift_openstack/tasks/hostname.yml create mode 100644 roles/openshift_openstack/tasks/net_vars_check.yaml create mode 100644 roles/openshift_openstack/tasks/node-configuration.yml create mode 100644 roles/openshift_openstack/tasks/node-network.yml create mode 100644 roles/openshift_openstack/tasks/node-packages.yml create mode 100644 roles/openshift_openstack/tasks/populate-dns.yml create mode 100644 roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml create mode 100644 roles/openshift_openstack/tasks/provision.yml create mode 100644 roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml create mode 100644 roles/openshift_openstack/templates/docker-storage-setup-dm.j2 create mode 100644 roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 create mode 100644 roles/openshift_openstack/templates/heat_stack.yaml.j2 create mode 100644 roles/openshift_openstack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openshift_openstack/templates/user_data.j2 create mode 100644 roles/openshift_openstack/vars/main.yml delete mode 100644 roles/openstack-stack/tasks/subnet_update_dns_servers.yaml (limited to 'roles') diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml deleted file mode 100644 index 8db591374..000000000 --- a/roles/common/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -openshift_cluster_node_labels: - app: - region: primary - infra: - region: infra diff --git a/roles/dns-records/defaults/main.yml b/roles/dns-records/defaults/main.yml deleted file mode 100644 index 3f7fa783f..000000000 --- a/roles/dns-records/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -use_bastion: False diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml deleted file mode 100644 index 7148b016a..000000000 --- a/roles/dns-records/tasks/main.yml +++ /dev/null @@ -1,121 +0,0 @@ ---- -- name: "Generate list of private A records" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Add wildcard records to the private A records for infrahosts" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - -- name: "Add public master cluster hostname records to the private A records (single master)" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - -- name: "Add public master cluster hostname records to the private A records (multi-master)" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_private is undefined - -- name: "Generate the private Add section for DNS" - set_fact: - private_named_records: - - view: "private" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_private }}" - key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" - key_secret: "{{ nsupdate_key_secret_private }}" - key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" - entries: "{{ private_records }}" - -- name: "Generate list of public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Add wildcard records to the public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Add public master cluster hostname records to the public A records (single master)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - not use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (multi-master)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 - -- name: "Set the public DNS server details to use the external value (if provided)" - set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server details to use the provisioned value" - set_fact: - nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_public is undefined - -- name: "Generate the public Add section for DNS" - set_fact: - public_named_records: - - view: "public" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_public }}" - key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" - key_secret: "{{ nsupdate_key_secret_public }}" - key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" - entries: "{{ public_records }}" - -- name: "Generate the final dns_records_add" - set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml deleted file mode 100644 index 58bd861cd..000000000 --- a/roles/dns-server-detect/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml deleted file mode 100644 index cd775814f..000000000 --- a/roles/dns-server-detect/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- fail: - msg: 'Missing required private DNS server(s)' - when: - - external_nsupdate_keys['private'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- fail: - msg: 'Missing required public DNS server(s)' - when: - - external_nsupdate_keys['public'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" - when: - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - private_dns_server: "{{ hostvars[groups['dns'][0]].private_v4 }}" - when: - - private_dns_server is undefined - -- name: "Set the public DNS server to use the external value (if provided)" - set_fact: - public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" - when: - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server to use the provisioned value" - set_fact: - public_dns_server: "{{ hostvars[groups['dns'][0]].public_v4 }}" - when: - - public_dns_server is undefined diff --git a/roles/dns-views/defaults/main.yml b/roles/dns-views/defaults/main.yml deleted file mode 100644 index c9f8248af..000000000 --- a/roles/dns-views/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -external_nsupdate_keys: {} -named_private_recursion: 'yes' -named_public_recursion: 'no' diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml deleted file mode 100644 index ffbad2e3f..000000000 --- a/roles/dns-views/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: "Generate ACL list for DNS server" - set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Generate the private view" - set_fact: - private_named_view: - - name: "private" - recursion: "{{ named_private_recursion }}" - acl_entry: "{{ acl_list }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - when: external_nsupdate_keys['private'] is undefined - -- name: "Generate the public view" - set_fact: - public_named_view: - - name: "public" - recursion: "{{ named_public_recursion }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - when: external_nsupdate_keys['public'] is undefined - -- name: "Generate the final named_config_views" - set_fact: - named_config_views: "{{ private_named_view|default([]) + public_named_view|default([]) }}" diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml deleted file mode 100644 index 062f543ad..000000000 --- a/roles/docker-storage-setup/defaults/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -docker_dev: "/dev/sdb" -docker_vg: "docker-vol" -docker_data_size: "95%VG" -docker_dm_basesize: "3G" -container_root_lv_name: "dockerlv" -container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml deleted file mode 100644 index 8606eeba4..000000000 --- a/roles/docker-storage-setup/tasks/main.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: stop docker - service: name=docker state=stopped - -- block: - - name: create the docker-storage config file - template: - src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - when: - - ansible_distribution_version | version_compare('7.4', '>=') - - ansible_distribution == "RedHat" - -- block: - - name: create the docker-storage-setup config file - template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - when: - - ansible_distribution_version | version_compare('7.4', '<') - - ansible_distribution == "RedHat" - -- block: - - name: create the docker-storage-setup config file for CentOS - template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - - # TODO(shadower): Find out which CentOS version supports overlayfs2 - when: - - ansible_distribution == "CentOS" - -- name: Install Docker - package: name=docker state=present - -- name: start docker - service: name=docker state=restarted enabled=true diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 deleted file mode 100644 index b5869feff..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 +++ /dev/null @@ -1,4 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 deleted file mode 100644 index d8b4a0276..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 +++ /dev/null @@ -1,7 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -STORAGE_DRIVER=overlay2 -CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" -CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" -CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml deleted file mode 100644 index c49852210..000000000 --- a/roles/hostnames/tasks/main.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - -- name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - -- name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" - -- name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - -- name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv deleted file mode 100644 index ffbe6e03d..000000000 --- a/roles/hostnames/test/inv +++ /dev/null @@ -1,12 +0,0 @@ -[all:vars] -dns_domain=example.com - -[openshift_masters] -192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 -192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 - -[openshift_nodes] -192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 - -#[dns] -#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/hostnames/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml deleted file mode 100644 index 0c56aea51..000000000 --- a/roles/hostnames/test/test.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- hosts: all - roles: - - role: hostnames diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml deleted file mode 100644 index 3eecb8dc4..000000000 --- a/roles/hostnames/vars/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml deleted file mode 100644 index 0cadc8181..000000000 --- a/roles/hostnames/vars/records.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: "Building Records" - set_fact: - dns_records_add: - - view: private - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 172.16.15.94 - - type: A - hostname: node1.example.com - ip: 172.16.15.86 - - type: A - hostname: node2.example.com - ip: 172.16.15.87 - - view: public - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 10.3.10.116 - - type: A - hostname: node1.example.com - ip: 10.3.11.46 - - type: A - hostname: node2.example.com - ip: 10.3.12.6 diff --git a/roles/node-network-manager/tasks/main.yml b/roles/node-network-manager/tasks/main.yml deleted file mode 100644 index 6a17855e7..000000000 --- a/roles/node-network-manager/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: install NetworkManager - package: - name: NetworkManager - state: present - -- name: configure NetworkManager - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" - regexp: '^{{ item }}=' - line: '{{ item }}=yes' - state: present - create: yes - with_items: - - 'USE_PEERDNS' - - 'NM_CONTROLLED' - -- name: enable and start NetworkManager - service: - name: NetworkManager - state: restarted - enabled: yes diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml deleted file mode 100644 index c8c9a00c0..000000000 --- a/roles/openshift-prep/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Defines either to install required packages and update all -manage_packages: true -install_debug_packages: false -required_packages: - - wget - - git - - net-tools - - bind-utils - - bridge-utils -debug_packages: - - bash-completion - - vim-enhanced diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml deleted file mode 100644 index 5e484e75f..000000000 --- a/roles/openshift-prep/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Starting Point for OpenShift Installation and Configuration -- include: prerequisites.yml - tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml deleted file mode 100644 index b7601aa48..000000000 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: "Cleaning yum repositories" - command: "yum clean all" - -- name: "Install required packages" - yum: - name: "{{ item }}" - state: latest - with_items: "{{ required_packages }}" - when: manage_packages|bool - -- name: "Install debug packages (optional)" - yum: - name: "{{ item }}" - state: latest - with_items: "{{ debug_packages }}" - when: install_debug_packages|bool - -- name: "Update all packages (this can take a very long time)" - yum: - name: '*' - state: latest - when: manage_packages|bool - -- name: "Verify hostname" - shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' - register: hostname_fqdn - -- name: "Set hostname if required" - hostname: - name: "{{ ansible_fqdn }}" - when: hostname_fqdn.stdout != ansible_fqdn - -- name: "Verify SELinux is enforcing" - fail: - msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" - when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml new file mode 100644 index 000000000..05f1c0911 --- /dev/null +++ b/roles/openshift_openstack/defaults/main.yml @@ -0,0 +1,49 @@ +--- + +stack_state: 'present' + +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 +bastion_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +nodes_to_remove: [] +etcd_volume_size: 2 +dns_volume_size: 1 +lb_volume_size: 5 +use_bastion: False +ui_ssh_tunnel: False +provider_network: False + + +openshift_cluster_node_labels: + app: + region: primary + infra: + region: infra + +install_debug_packages: false +required_packages: + - docker + - NetworkManager + - wget + - git + - net-tools + - bind-utils + - bridge-utils +debug_packages: + - bash-completion + - vim-enhanced + +# container-storage-setup +docker_dev: "/dev/sdb" +docker_vg: "docker-vol" +docker_data_size: "95%VG" +docker_dm_basesize: "3G" +container_root_lv_name: "dockerlv" +container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml new file mode 100644 index 000000000..4d7cfbf11 --- /dev/null +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -0,0 +1,109 @@ +--- +# Check ansible +- name: Check Ansible version + assert: + that: > + (ansible_version.major == 2 and ansible_version.minor >= 3) or + (ansible_version.major > 2) + msg: "Ansible version must be at least 2.3" + +# Check shade +- name: Try to import python module shade + command: python -c "import shade" + ignore_errors: yes + register: shade_result +- name: Check if shade is installed + assert: + that: 'shade_result.rc == 0' + msg: "Python module shade is not installed" + +# Check jmespath +- name: Try to import python module shade + command: python -c "import jmespath" + ignore_errors: yes + register: jmespath_result +- name: Check if jmespath is installed + assert: + that: 'jmespath_result.rc == 0' + msg: "Python module jmespath is not installed" + +# Check python-dns +- name: Try to import python DNS module + command: python -c "import dns" + ignore_errors: yes + register: pythondns_result +- name: Check if python-dns is installed + assert: + that: 'pythondns_result.rc == 0' + msg: "Python module python-dns is not installed" + +# Check jinja2 +- name: Try to import jinja2 module + command: python -c "import jinja2" + ignore_errors: yes + register: jinja_result +- name: Check if jinja2 is installed + assert: + that: 'jinja_result.rc == 0' + msg: "Python module jinja2 is not installed" + +# Check Glance image +- name: Try to get image facts + os_image_facts: + image: "{{ openstack_default_image_name }}" + register: image_result +- name: Check that image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ openstack_default_image_name }} is not available" + +# Check network name +- name: Try to get network facts + os_networks_facts: + name: "{{ openstack_external_network_name }}" + register: network_result + when: not openstack_provider_network_name|default(None) +- name: Check that network is available + assert: + that: "network_result.ansible_facts.openstack_networks" + msg: "Network {{ openstack_external_network_name }} is not available" + when: not openstack_provider_network_name|default(None) + +# Check keypair +# TODO kpilatov: there is no Ansible module for getting OS keypairs +# (os_keypair is not suitable for this) +# this method does not force python-openstackclient dependency +- name: Try to show keypair + command: > + python -c 'import shade; cloud = shade.openstack_cloud(); + exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + ignore_errors: yes + register: key_result +- name: Check that keypair is available + assert: + that: 'key_result.rc == 0' + msg: "Keypair {{ openstack_ssh_public_key }} is not available" + +# Check that custom images are available +- include: custom_image_check.yaml + with_items: + - "{{ openstack_master_image }}" + - "{{ openstack_infra_image }}" + - "{{ openstack_node_image }}" + - "{{ openstack_lb_image }}" + - "{{ openstack_etcd_image }}" + - "{{ openstack_dns_image }}" + loop_control: + loop_var: image + +# Check that custom flavors are available +- include: custom_flavor_check.yaml + with_items: + - "{{ master_flavor }}" + - "{{ infra_flavor }}" + - "{{ node_flavor }}" + - "{{ lb_flavor }}" + - "{{ etcd_flavor }}" + - "{{ dns_flavor }}" + loop_control: + loop_var: flavor diff --git a/roles/openshift_openstack/tasks/cleanup.yml b/roles/openshift_openstack/tasks/cleanup.yml new file mode 100644 index 000000000..258334a6b --- /dev/null +++ b/roles/openshift_openstack/tasks/cleanup.yml @@ -0,0 +1,6 @@ +--- + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml new file mode 100644 index 000000000..5cd48ca2c --- /dev/null +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -0,0 +1,37 @@ +--- +- block: + - name: create the docker-storage config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file for CentOS + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + # TODO(shadower): Find out which CentOS version supports overlayfs2 + when: + - ansible_distribution == "CentOS" diff --git a/roles/openshift_openstack/tasks/custom_flavor_check.yaml b/roles/openshift_openstack/tasks/custom_flavor_check.yaml new file mode 100644 index 000000000..e11874c28 --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_flavor_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get flavor facts + os_flavor_facts: + name: "{{ flavor }}" + register: flavor_result +- name: Check that custom flavor is available + assert: + that: "flavor_result.ansible_facts.openstack_flavors" + msg: "Flavor {{ flavor }} is not available." diff --git a/roles/openshift_openstack/tasks/custom_image_check.yaml b/roles/openshift_openstack/tasks/custom_image_check.yaml new file mode 100644 index 000000000..4fbd6a687 --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_image_check.yaml @@ -0,0 +1,10 @@ +--- +- name: Try to get image facts + os_image_facts: + image: "{{ image }}" + register: image_result + +- name: Check that custom image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ image }} is not available." diff --git a/roles/openshift_openstack/tasks/generate-templates.yml b/roles/openshift_openstack/tasks/generate-templates.yml new file mode 100644 index 000000000..0ff50a095 --- /dev/null +++ b/roles/openshift_openstack/tasks/generate-templates.yml @@ -0,0 +1,26 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: openshift-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml new file mode 100644 index 000000000..0fc8fbc4c --- /dev/null +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -0,0 +1,33 @@ +--- +- name: "Verify hostname" + command: hostnamectl status --static + register: hostname_fqdn + +- name: "Set hostname if required" + when: hostname_fqdn.stdout != ansible_fqdn + block: + - name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" + + - name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" + + - name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" + + - name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg + + - name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/net_vars_check.yaml b/roles/openshift_openstack/tasks/net_vars_check.yaml new file mode 100644 index 000000000..68afde415 --- /dev/null +++ b/roles/openshift_openstack/tasks/net_vars_check.yaml @@ -0,0 +1,14 @@ +--- +- name: Check the provider network configuration + fail: + msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" + when: + - openstack_provider_network_name is defined + - openstack_private_data_network_name is defined + +- name: Check the flannel network configuration + fail: + msg: "A dedicated containers data network is only supported with Flannel SDN" + when: + - openstack_private_data_network_name is defined + - not openshift_use_flannel|default(False)|bool diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml new file mode 100644 index 000000000..8a6a8022f --- /dev/null +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -0,0 +1,11 @@ +--- +- include: hostname.yml + +- include: container-storage-setup.yml + +- include: node-network.yml + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openshift_openstack/tasks/node-network.yml b/roles/openshift_openstack/tasks/node-network.yml new file mode 100644 index 000000000..f494e5158 --- /dev/null +++ b/roles/openshift_openstack/tasks/node-network.yml @@ -0,0 +1,19 @@ +--- +- name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + +- name: enable and start NetworkManager + service: + name: NetworkManager + state: restarted + enabled: yes + +# TODO(shadower): add the flannel interface tasks from post-provision-openstack.yml diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml new file mode 100644 index 000000000..c65eaec3b --- /dev/null +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -0,0 +1,15 @@ +--- +# TODO: subscribe to RHEL and install docker and other packages here + +- name: Install required packages + yum: + name: "{{ item }}" + state: latest + with_items: "{{ required_packages }}" + +- name: Install debug packages (optional) + yum: + name: "{{ item }}" + state: latest + with_items: "{{ debug_packages }}" + when: install_debug_packages|bool diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml new file mode 100644 index 000000000..f1a868a19 --- /dev/null +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -0,0 +1,5 @@ +# TODO: use nsupdate to populate the DNS servers using the keys +# specified in the inventory. + +# this is an optional step -- the deployers may do whatever else they +# wish here. diff --git a/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml new file mode 100644 index 000000000..fc51f6dc2 --- /dev/null +++ b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml @@ -0,0 +1,59 @@ +--- +- name: Attach the volume to the VM + os_server_volume: + state: present + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" + register: volume_attachment + +- set_fact: + attached_device: >- + {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + +- delegate_to: "{{ groups['masters'][0] }}" + block: + - name: Wait for the device to appear + wait_for: path={{ attached_device }} + + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir + + - name: Format the device + filesystem: + fstype: "{{ cinder_fs }}" + dev: "{{ attached_device }}" + + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: mounted + fstype: "{{ cinder_fs }}" + + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 + + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: absent + fstype: "{{ cinder_fs }}" + + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent + +- name: Detach the volume from the VM + os_server_volume: + state: absent + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml new file mode 100644 index 000000000..8ebda8100 --- /dev/null +++ b/roles/openshift_openstack/tasks/provision.yml @@ -0,0 +1,30 @@ +--- +- name: Generate the templates + include: generate-templates.yml + when: + - stack_state == 'present' + +- name: Handle the Stack (create/delete) + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: "{{ stack_state }}" + template: "{{ stack_template_path | default(omit) }}" + wait: yes + +- name: Add the new nodes to the inventory + meta: refresh_inventory + +- name: Populate DNS entries + include: populate-dns.yml + when: + - stack_state == 'present' + +- name: CleanUp + include: cleanup.yml + when: + - stack_state == 'present' + +# TODO(shadower): create the registry and PV Cinder volumes if specified +# and include the `prepare-and-format-cinder-volume` tasks to set it up diff --git a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml new file mode 100644 index 000000000..af28fc98f --- /dev/null +++ b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml @@ -0,0 +1,9 @@ +--- +- name: Live update the subnet's DNS servers + os_subnet: + name: openshift-ansible-{{ stack_name }}-subnet + network_name: openshift-ansible-{{ stack_name }}-net + state: present + use_default_subnetpool: yes + dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" + when: not provider_network diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 new file mode 100644 index 000000000..b5869feff --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 new file mode 100644 index 000000000..d8b4a0276 --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -0,0 +1,7 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +STORAGE_DRIVER=overlay2 +CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..2359842a5 --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -0,0 +1,888 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + +{% if num_dns|int > 0 %} + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ips: + description: Floating IPs of the DNS + value: { get_attr: [ dns, floating_ip ] } + + dns_private_ips: + description: Private IPs of the DNS + value: { get_attr: [ dns, private_ip ] } +{% endif %} + +conditions: + no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + +resources: + +{% if not provider_network %} + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: +{% for nameserver in dns_nameservers %} + - {{ nameserver }} +{% endfor %} + +{% if openshift_use_flannel|default(False)|bool %} + data_net: + type: OS::Neutron::Net + properties: + name: openshift-ansible-{{ stack_name }}-data-net + port_security_enabled: false + + data_subnet: + type: OS::Neutron::Subnet + properties: + name: openshift-ansible-{{ stack_name }}-data-subnet + network: { get_resource: data_net } + cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} + gateway_ip: null +{% endif %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +{% endif %} + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + + common-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-common-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Basic ssh/icmp security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% if use_bastion|bool %} + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ bastion_ingress_cidr }} +{% endif %} + - direction: ingress + protocol: icmp + remote_ip_prefix: {{ ssh_ingress_cidr }} + +{% if openstack_flat_secgrp|default(False)|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% else %} + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 +{% if openshift_use_flannel|default(False)|bool %} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 +{% endif %} + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + +{% if num_dns|int > 0 %} + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + +{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% if ui_ssh_tunnel|bool %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% endif %} +{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% endif %} +{% endif %} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ etcd_hostname | default('etcd') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_etcd_image | default(openstack_image) }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ etcd_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + +{% if master_server_group_policies|length > 0 %} + master_server_group: + type: OS::Nova::ServerGroup + properties: + name: master_server_group + policies: {{ master_server_group_policies }} +{% endif %} +{% if infra_server_group_policies|length > 0 %} + infra_server_group: + type: OS::Nova::ServerGroup + properties: + name: infra_server_group + policies: {{ infra_server_group_policies }} +{% endif %} +{% if num_masters|int > 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ lb_hostname | default('lb') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_lb_image | default(openstack_image) }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ lb_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} +{% endif %} + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ master_hostname | default('master')}} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_master_image | default(openstack_image) }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +{% if openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } +{% if num_etcd|int == 0 %} + - { get_resource: etcd-secgrp } +{% endif %} +{% endif %} + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ master_volume_size }} +{% if master_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: master_server_group } +{% endif %} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + removal_policies: + - resource_list: {{ nodes_to_remove }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + sub_type_k8s_type: {{ node_hostname | default('app-node') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: +{% for k, v in openshift_cluster_node_labels.app.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openstack_node_image | default(openstack_image) }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ node_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + sub_type_k8s_type: {{ infra_hostname | default('infranode') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: +{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openstack_infra_image | default(openstack_image) }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +# TODO(bogdando) filter only required node rules into infra-secgrp +{% if openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} +{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} + - { get_resource: lb-secgrp } +{% endif %} + - { get_resource: infra-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ infra_volume_size }} +{% if infra_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: infra_server_group } +{% endif %} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + +{% if num_dns|int > 0 %} + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ dns_hostname | default('dns') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_dns_image | default(openstack_image) }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ dns_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} +{% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..9ffe721a5 --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,270 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + +{% if not provider_network %} + subnet: + type: string + label: Subnet ID + description: Subnet resource +{% endif %} + +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + +{% if not provider_network %} + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} +{% endif %} + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + attach_float_net: + type: boolean + default: true + + label: Attach-float-net + description: A switch for floating network port connection + +{% if not provider_network %} + floating_network: + type: string + default: '' + label: Floating network + description: Network to allocate floating IP from +{% endif %} + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } +{% if provider_network %} + - 0 +{% else %} + - 1 +{% endif %} + - addr + +conditions: + no_floating: {not: { get_param: attach_float_net} } +{% if openshift_use_flannel|default(False)|bool %} + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} + - port: { get_resource: port } +{% endif %} +{% endif %} + user_data: + get_file: user-data + user_data_format: RAW + user_data_update_policy: IGNORE + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } + +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: subnet } +{% endif %} + security_groups: { get_param: secgrp } + +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + +{% if not provider_network %} + floating-ip: + condition: { not: no_floating } + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } +{% endif %} + +{% if not ephemeral_volumes|default(false)|bool %} + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb +{% endif %} diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openshift_openstack/vars/main.yml b/roles/openshift_openstack/vars/main.yml new file mode 100644 index 000000000..a4da31bfe --- /dev/null +++ b/roles/openshift_openstack/vars/main.yml @@ -0,0 +1,49 @@ +--- +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +use_bastion: "{{ openstack_use_bastion|default(False) }}" +ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 983567026..0348f53ce 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Generate the templates include: generate-templates.yml when: diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml deleted file mode 100644 index af28fc98f..000000000 --- a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Live update the subnet's DNS servers - os_subnet: - name: openshift-ansible-{{ stack_name }}-subnet - network_name: openshift-ansible-{{ stack_name }}-net - state: present - use_default_subnetpool: yes - dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" - when: not provider_network -- cgit v1.2.1 From 63fb0c74fcb0adf4cd3b0b2b5d30e34e29a58796 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 17:27:51 +0200 Subject: Remove the extra roles The `openstack-stack` role is now under `openshift_openstack` and the `openstack-create-cinder-registry` one will be added there, later. --- .../tasks/main.yaml | 5 - roles/openstack-stack/README.md | 9 - roles/openstack-stack/defaults/main.yml | 21 - roles/openstack-stack/meta/main.yml | 3 - roles/openstack-stack/tasks/cleanup.yml | 6 - roles/openstack-stack/tasks/generate-templates.yml | 26 - roles/openstack-stack/tasks/main.yml | 26 - roles/openstack-stack/templates/heat_stack.yaml.j2 | 888 --------------------- .../templates/heat_stack_server.yaml.j2 | 270 ------- roles/openstack-stack/templates/user_data.j2 | 13 - roles/openstack-stack/test/roles | 1 - roles/openstack-stack/test/stack-create-test.yml | 18 - roles/static_inventory/defaults/main.yml | 29 - roles/static_inventory/meta/main.yml | 3 - roles/static_inventory/tasks/checkpoint.yml | 17 - .../tasks/filter_out_new_app_nodes.yaml | 15 - roles/static_inventory/tasks/main.yml | 25 - roles/static_inventory/tasks/openstack.yml | 120 --- roles/static_inventory/tasks/sshconfig.yml | 13 - roles/static_inventory/tasks/sshtun.yml | 15 - roles/static_inventory/templates/inventory.j2 | 104 --- .../templates/openstack_ssh_config.j2 | 21 - .../templates/ssh-tunnel.service.j2 | 20 - 23 files changed, 1668 deletions(-) delete mode 100644 roles/openstack-create-cinder-registry/tasks/main.yaml delete mode 100644 roles/openstack-stack/README.md delete mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/meta/main.yml delete mode 100644 roles/openstack-stack/tasks/cleanup.yml delete mode 100644 roles/openstack-stack/tasks/generate-templates.yml delete mode 100644 roles/openstack-stack/tasks/main.yml delete mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 delete mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 delete mode 100644 roles/openstack-stack/templates/user_data.j2 delete mode 120000 roles/openstack-stack/test/roles delete mode 100644 roles/openstack-stack/test/stack-create-test.yml delete mode 100644 roles/static_inventory/defaults/main.yml delete mode 100644 roles/static_inventory/meta/main.yml delete mode 100644 roles/static_inventory/tasks/checkpoint.yml delete mode 100644 roles/static_inventory/tasks/filter_out_new_app_nodes.yaml delete mode 100644 roles/static_inventory/tasks/main.yml delete mode 100644 roles/static_inventory/tasks/openstack.yml delete mode 100644 roles/static_inventory/tasks/sshconfig.yml delete mode 100644 roles/static_inventory/tasks/sshtun.yml delete mode 100644 roles/static_inventory/templates/inventory.j2 delete mode 100644 roles/static_inventory/templates/openstack_ssh_config.j2 delete mode 100644 roles/static_inventory/templates/ssh-tunnel.service.j2 (limited to 'roles') diff --git a/roles/openstack-create-cinder-registry/tasks/main.yaml b/roles/openstack-create-cinder-registry/tasks/main.yaml deleted file mode 100644 index 6e9d1c2e7..000000000 --- a/roles/openstack-create-cinder-registry/tasks/main.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- os_volume: - display_name: "{{ cinder_hosted_registry_name }}" - size: "{{ cinder_hosted_registry_size_gb }}" - register: cinder_registry_volume diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md deleted file mode 100644 index 32a2b49f1..000000000 --- a/roles/openstack-stack/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Role openstack-stack - -Role for spinning up instances using OpenStack Heat. - -## To Test - -``` -ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml -``` diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml deleted file mode 100644 index a24e684cc..000000000 --- a/roles/openstack-stack/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- - -stack_state: 'present' - -ssh_ingress_cidr: 0.0.0.0/0 -node_ingress_cidr: 0.0.0.0/0 -master_ingress_cidr: 0.0.0.0/0 -lb_ingress_cidr: 0.0.0.0/0 -bastion_ingress_cidr: 0.0.0.0/0 -num_etcd: 0 -num_masters: 1 -num_nodes: 1 -num_dns: 1 -num_infra: 1 -nodes_to_remove: [] -etcd_volume_size: 2 -dns_volume_size: 1 -lb_volume_size: 5 -use_bastion: False -ui_ssh_tunnel: False -provider_network: False diff --git a/roles/openstack-stack/meta/main.yml b/roles/openstack-stack/meta/main.yml deleted file mode 100644 index fdda41bb3..000000000 --- a/roles/openstack-stack/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: common diff --git a/roles/openstack-stack/tasks/cleanup.yml b/roles/openstack-stack/tasks/cleanup.yml deleted file mode 100644 index 258334a6b..000000000 --- a/roles/openstack-stack/tasks/cleanup.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: cleanup temp files - file: - path: "{{ stack_template_pre.path }}" - state: absent diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml deleted file mode 100644 index 0ff50a095..000000000 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: create HOT stack template prefix - register: stack_template_pre - tempfile: - state: directory - prefix: openshift-ansible - -- name: set template paths - set_fact: - stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - user_data_template_path: "{{ stack_template_pre.path }}/user-data" - -- name: generate HOT stack template from jinja2 template - template: - src: heat_stack.yaml.j2 - dest: "{{ stack_template_path }}" - -- name: generate HOT server template from jinja2 template - template: - src: heat_stack_server.yaml.j2 - dest: "{{ stack_template_pre.path }}/server.yaml" - -- name: generate user_data from jinja2 template - template: - src: user_data.j2 - dest: "{{ user_data_template_path }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml deleted file mode 100644 index 0348f53ce..000000000 --- a/roles/openstack-stack/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Generate the templates - include: generate-templates.yml - when: - - stack_state == 'present' - -- name: Handle the Stack (create/delete) - ignore_errors: False - register: stack_create - os_stack: - name: "{{ stack_name }}" - state: "{{ stack_state }}" - template: "{{ stack_template_path | default(omit) }}" - wait: yes - -# NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for -# dns_nameservers, so we can't do that for the "create stack" task. -- include: subnet_update_dns_servers.yaml - when: - - private_dns_server is defined - - stack_state == 'present' - -- name: CleanUp - include: cleanup.yml - when: - - stack_state == 'present' diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 deleted file mode 100644 index 2359842a5..000000000 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ /dev/null @@ -1,888 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster - -parameters: - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - -{% if num_dns|int > 0 %} - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ips: - description: Floating IPs of the DNS - value: { get_attr: [ dns, floating_ip ] } - - dns_private_ips: - description: Private IPs of the DNS - value: { get_attr: [ dns, private_ip ] } -{% endif %} - -conditions: - no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} - -resources: - -{% if not provider_network %} - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: {{ stack_name }} - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: {{ subnet_prefix }} - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: {{ subnet_prefix }} - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: {{ subnet_prefix }} - dns_nameservers: -{% for nameserver in dns_nameservers %} - - {{ nameserver }} -{% endfor %} - -{% if openshift_use_flannel|default(False)|bool %} - data_net: - type: OS::Neutron::Net - properties: - name: openshift-ansible-{{ stack_name }}-data-net - port_security_enabled: false - - data_subnet: - type: OS::Neutron::Subnet - properties: - name: openshift-ansible-{{ stack_name }}-data-subnet - network: { get_resource: data_net } - cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} - gateway_ip: null -{% endif %} - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: {{ stack_name }} - external_gateway_info: - network: {{ external_network }} - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -{% endif %} - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: {{ stack_name }} -# public_key: {{ ssh_public_key }} - - common-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-common-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Basic ssh/icmp security group for cluster_id OpenShift cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if use_bastion|bool %} - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ bastion_ingress_cidr }} -{% endif %} - - direction: ingress - protocol: icmp - remote_ip_prefix: {{ ssh_ingress_cidr }} - -{% if openstack_flat_secgrp|default(False)|bool %} - flat-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-flat-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port|default(8443) }} - port_range_max: {{ openshift_master_api_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port|default(8443) }} - port_range_max: {{ openshift_master_console_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2380 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% else %} - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port|default(8443) }} - port_range_max: {{ openshift_master_api_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port|default(8443) }} - port_range_max: {{ openshift_master_console_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 -{% if openshift_use_flannel|default(False)|bool %} - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 -{% endif %} - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% endif %} - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - -{% if num_dns|int > 0 %} - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% endif %} - -{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} - lb-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: openshift-ansible-{{ stack_name }}-lb-secgrp - description: Security group for {{ stack_name }} cluster Load Balancer - rules: - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if ui_ssh_tunnel|bool %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% endif %} -{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port | default(8443) }} - port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% endif %} -{% endif %} - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_etcd }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname | default('etcd') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: etcds - cluster_id: {{ stack_name }} - type: etcd - image: {{ openstack_etcd_image | default(openstack_image) }} - flavor: {{ etcd_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ etcd_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - -{% if master_server_group_policies|length > 0 %} - master_server_group: - type: OS::Nova::ServerGroup - properties: - name: master_server_group - policies: {{ master_server_group_policies }} -{% endif %} -{% if infra_server_group_policies|length > 0 %} - infra_server_group: - type: OS::Nova::ServerGroup - properties: - name: infra_server_group - policies: {{ infra_server_group_policies }} -{% endif %} -{% if num_masters|int > 1 %} - loadbalancer: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname | default('lb') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: lb - cluster_id: {{ stack_name }} - type: lb - image: {{ openstack_lb_image | default(openstack_image) }} - flavor: {{ lb_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: lb-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ lb_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} -{% endif %} - - masters: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_masters }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname | default('master')}} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: masters - cluster_id: {{ stack_name }} - type: master - image: {{ openstack_master_image | default(openstack_image) }} - flavor: {{ master_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: -{% if openstack_flat_secgrp|default(False)|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } -{% if num_etcd|int == 0 %} - - { get_resource: etcd-secgrp } -{% endif %} -{% endif %} - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ master_volume_size }} -{% if master_server_group_policies|length > 0 %} - scheduler_hints: - group: { get_resource: master_server_group } -{% endif %} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_nodes }} - removal_policies: - - resource_list: {{ nodes_to_remove }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: sub_type_k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname | default('app-node') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: nodes - cluster_id: {{ stack_name }} - type: node - subtype: app - node_labels: -{% for k, v in openshift_cluster_node_labels.app.iteritems() %} - {{ k|e }}: {{ v|e }} -{% endfor %} - image: {{ openstack_node_image | default(openstack_image) }} - flavor: {{ node_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ node_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_infra }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: sub_type_k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname | default('infranode') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: infra - cluster_id: {{ stack_name }} - type: node - subtype: infra - node_labels: -{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} - {{ k|e }}: {{ v|e }} -{% endfor %} - image: {{ openstack_infra_image | default(openstack_image) }} - flavor: {{ infra_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: -# TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|default(False)|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } -{% endif %} -{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} - - { get_resource: lb-secgrp } -{% endif %} - - { get_resource: infra-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ infra_volume_size }} -{% if infra_server_group_policies|length > 0 %} - scheduler_hints: - group: { get_resource: infra_server_group } -{% endif %} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - -{% if num_dns|int > 0 %} - dns: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_dns }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname | default('dns') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: dns - cluster_id: {{ stack_name }} - type: dns - image: {{ openstack_dns_image | default(openstack_image) }} - flavor: {{ dns_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: dns-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ dns_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} -{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 deleted file mode 100644 index 9ffe721a5..000000000 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ /dev/null @@ -1,270 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - group: - type: string - label: Host Group - description: The Primary Ansible Host Group - default: host - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - -{% if not provider_network %} - subnet: - type: string - label: Subnet ID - description: Subnet resource -{% endif %} - -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: - type: boolean - default: false - label: Attach-data-net - description: A switch for data port connection - - data_net: - type: string - default: '' - label: Net ID - description: Net resource - -{% if not provider_network %} - data_subnet: - type: string - default: '' - label: Subnet ID - description: Subnet resource -{% endif %} -{% endif %} - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - attach_float_net: - type: boolean - default: true - - label: Attach-float-net - description: A switch for floating network port connection - -{% if not provider_network %} - floating_network: - type: string - default: '' - label: Floating network - description: Network to allocate floating IP from -{% endif %} - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - node_labels: - type: json - description: OpenShift Node Labels - default: {"region": "default" } - - scheduler_hints: - type: json - description: Server scheduler hints. - default: {} - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } -{% if provider_network %} - - 0 -{% else %} - - 1 -{% endif %} - - addr - -conditions: - no_floating: {not: { get_param: attach_float_net} } -{% if openshift_use_flannel|default(False)|bool %} - no_data_subnet: {not: { get_param: attach_data_net} } -{% endif %} - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: -{% if openshift_use_flannel|default(False)|bool %} - if: - - no_data_subnet -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } -{% endif %} -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } - - port: { get_resource: data_port } -{% endif %} - -{% else %} -{% if use_trunk_ports|default(false)|bool %} - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - port: { get_resource: port } -{% endif %} -{% endif %} - user_data: - get_file: user-data - user_data_format: RAW - user_data_update_policy: IGNORE - metadata: - group: { get_param: group } - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - node_labels: { get_param: node_labels } - scheduler_hints: { get_param: scheduler_hints } - -{% if use_trunk_ports|default(false)|bool %} - trunk-port: - type: OS::Neutron::Trunk - properties: - name: { get_param: name } - port: { get_resource: port } -{% endif %} - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: subnet } -{% endif %} - security_groups: { get_param: secgrp } - -{% if openshift_use_flannel|default(False)|bool %} - data_port: - type: OS::Neutron::Port - condition: { not: no_data_subnet } - properties: - network: { get_param: data_net } - port_security_enabled: false -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: data_subnet } -{% endif %} -{% endif %} - -{% if not provider_network %} - floating-ip: - condition: { not: no_floating } - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } -{% endif %} - -{% if not ephemeral_volumes|default(false)|bool %} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb -{% endif %} diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/templates/user_data.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/openstack-stack/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml deleted file mode 100644 index d80472193..000000000 --- a/roles/openstack-stack/test/stack-create-test.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- hosts: localhost - gather_facts: True - become: False - roles: - - role: openstack-stack - stack_name: test-stack - dns_domain: "{{ public_dns_domain }}" - dns_nameservers: "{{ public_dns_nameservers }}" - subnet_prefix: "{{ openstack_subnet_prefix }}" - ssh_public_key: "{{ openstack_ssh_public_key }}" - openstack_image: "{{ openstack_default_image_name }}" - etcd_flavor: "{{ openstack_default_flavor }}" - master_flavor: "{{ openstack_default_flavor }}" - node_flavor: "{{ openstack_default_flavor }}" - infra_flavor: "{{ openstack_default_flavor }}" - dns_flavor: "{{ openstack_default_flavor }}" - external_network: "{{ openstack_external_network_name }}" diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml deleted file mode 100644 index 871700f8c..000000000 --- a/roles/static_inventory/defaults/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Either to checkpoint the dynamic inventory into a static one -refresh_inventory: True -inventory: static -inventory_path: ~/openstack-inventory - -# Either to configure bastion -use_bastion: true - -# SSH user/key/options to access hosts via bastion -ssh_user: openshift -ssh_options: >- - -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no - -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s - -o ServerAliveInterval=30 -o GSSAPIAuthentication=no - -# SSH key to access nodes -private_ssh_key: ~/.ssh/openshift - -# The patch to store the generated config to access bastion/hosts -ssh_config_path: /tmp/ssh.config.ansible - -# The IP:port to make an SSH tunnel to access UI on the 1st master -# via bastion node (requires sudo on the ansible control node) -ui_ssh_tunnel: False -ui_port: "{{ openshift_master_api_port | default(8443) }}" -target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}" - -openstack_private_network: private diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml deleted file mode 100644 index fdda41bb3..000000000 --- a/roles/static_inventory/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: common diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml deleted file mode 100644 index c0365bd3d..000000000 --- a/roles/static_inventory/tasks/checkpoint.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: check for static inventory dir - stat: - path: "{{ inventory_path }}" - register: stat_inventory_path - -- name: create static inventory dir - file: - path: "{{ inventory_path }}" - state: directory - mode: 0750 - when: not stat_inventory_path.stat.exists - -- name: create inventory from template - template: - src: inventory.j2 - dest: "{{ inventory_path }}/hosts" diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml deleted file mode 100644 index 826efe78d..000000000 --- a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Add all new app nodes to new_app_nodes - when: - - 'oc_old_app_nodes is defined' - - 'oc_old_app_nodes | list' - - 'node.name not in oc_old_app_nodes' - - 'node["metadata"]["sub-host-type"] == "app"' - register: result - set_fact: - new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]' - -- name: If the node was added to new_nodes, remove it from registered nodes - set_fact: - registered_nodes: '{{ registered_nodes | difference([ node ]) }}' - when: 'not result | skipped' diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml deleted file mode 100644 index 3dab62df2..000000000 --- a/roles/static_inventory/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Remove any existing inventory - file: - path: "{{ inventory_path }}/hosts" - state: absent - -- name: Refresh the inventory - meta: refresh_inventory - -- name: Generate in-memory inventory - include: openstack.yml - -- name: Checkpoint in-memory data into a static inventory - include: checkpoint.yml - -- name: Generate SSH config for accessing hosts via bastion - include: sshconfig.yml - when: use_bastion|bool - -- name: Configure SSH tunneling to access UI - include: sshtun.yml - become: true - when: - - use_bastion|bool - - ui_ssh_tunnel|bool diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml deleted file mode 100644 index adf78c966..000000000 --- a/roles/static_inventory/tasks/openstack.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -- no_log: true - block: - - name: fetch all nodes from openstack shade dynamic inventory - command: shade-inventory --list - register: registered_nodes_output - when: refresh_inventory|bool - - - name: set fact for openstack inventory cluster nodes - set_fact: - registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes - set_fact: - registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" - vars: - q: "[] | [?metadata.group=='infra.{{stack_name}}']" - q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes with provider network - set_fact: - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']" - when: - - refresh_inventory|bool - - openstack_provider_network_name|default(None) - - - name: Add cluster nodes w/o floating IPs to inventory - with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- else -%} - {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - - - name: Add cluster nodes with floating IPs to inventory - with_items: "{{ registered_nodes_floating }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- elif openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: >- - {% if openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - - # Split registered_nodes into old nodes and new app nodes - # Add new app nodes to new_nodes host group for upscaling - - name: Create new_app_nodes variable - set_fact: - new_app_nodes: [] - - - name: Filter new app nodes out of registered_nodes - include: filter_out_new_app_nodes.yaml - with_items: "{{ registered_nodes }}" - loop_control: - loop_var: node - - - name: Add new app nodes to the new_nodes section (if a deployment already exists) - with_items: "{{ new_app_nodes }}" - add_host: - name: "{{ item.name }}" - groups: new_nodes, app - - - name: Add the rest of cluster nodes to their corresponding groups - with_items: "{{ registered_nodes }}" - add_host: - name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' - - - name: Add bastion node to inventory - add_host: - name: bastion - groups: bastions - ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}' - ansible_fqdn: '{{ registered_bastion_nodes[0].name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' - when: - - registered_bastion_nodes is defined - - use_bastion|bool diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml deleted file mode 100644 index 7119fe6ff..000000000 --- a/roles/static_inventory/tasks/sshconfig.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: set ssh proxy command prefix for accessing nodes via bastion - set_fact: - ssh_proxy_command: >- - ssh {{ ssh_options }} - -i {{ private_ssh_key }} - {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} - -- name: regenerate ssh config - template: - src: openstack_ssh_config.j2 - dest: "{{ ssh_config_path }}" - mode: 0644 diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml deleted file mode 100644 index b0e4c832c..000000000 --- a/roles/static_inventory/tasks/sshtun.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Create ssh tunnel systemd service - template: - src: ssh-tunnel.service.j2 - dest: /etc/systemd/system/ssh-tunnel.service - mode: 0644 - -- name: reload the systemctl daemon after file update - command: systemctl daemon-reload - -- name: Enable ssh tunnel service - service: - name: ssh-tunnel - enabled: true - state: restarted diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 deleted file mode 100644 index 9dfbe3a5b..000000000 --- a/roles/static_inventory/templates/inventory.j2 +++ /dev/null @@ -1,104 +0,0 @@ -# BEGIN Autogenerated hosts -{% for host in groups['all'] %} -{% if hostvars[host].get('ansible_connection', '') == 'local' %} -{{ host }} ansible_connection=local -{% else %} - -{{ host }}{% if 'ansible_host' in hostvars[host] -%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %} -{% if 'private_v4' in hostvars[host] -%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} -{% if 'public_v4' in hostvars[host] -%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} -{% if 'ansible_user' in hostvars[host] -%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} -{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file'] -%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} -{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] -%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} - -{% endif %} -{% endfor %} -# END autogenerated hosts - -#[all:vars] -# For all group_vars, see ./group_vars/all.yml -[infra_hosts:vars] -openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }} - -[app:vars] -openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }} - -# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. -# The lb group lets Ansible configure HAProxy as the load balancing solution. -# Comment lb out if your load balancer is pre-configured. -[cluster_hosts:children] -OSEv3 -dns - -[OSEv3:children] -nodes -etcd -lb -new_nodes - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] - -# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml - -{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %} -openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" -openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" -{% endif %} - - -# Host Groups - -[masters:children] -masters.{{ stack_name }} - -[etcd:children] -etcd.{{ stack_name }} -{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %} - -[nodes:children] -masters -infra.{{ stack_name }} -nodes.{{ stack_name }} - -[infra_hosts:children] -infra.{{ stack_name }} - -[app:children] -nodes.{{ stack_name }} - -[dns:children] -dns.{{ stack_name }} - -[lb:children] -lb.{{ stack_name }} - -[new_nodes:children] - -# Empty placeholders for all groups of the cluster nodes -[masters.{{ stack_name }}] -[etcd.{{ stack_name }}] -[infra.{{ stack_name }}] -[nodes.{{ stack_name }}] -[app.{{ stack_name }}] -[dns.{{ stack_name }}] -[lb.{{ stack_name }}] -[new_nodes.{{ stack_name }}] - -# BEGIN Autogenerated groups -{% for group in groups %} -{% if group not in ['ungrouped', 'all'] %} -[{{ group }}] -{% for host in groups[group] %} -{{ host }} -{% endfor %} - -{% endif %} -{% endfor %} -# END Autogenerated groups diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2 deleted file mode 100644 index ad5d1253a..000000000 --- a/roles/static_inventory/templates/openstack_ssh_config.j2 +++ /dev/null @@ -1,21 +0,0 @@ -Host * - IdentitiesOnly yes - -Host bastion - Hostname {{ hostvars['bastion'].ansible_host }} - IdentityFile {{ hostvars['bastion'].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% for host in groups['all'] | difference(groups['bastions'][0]) %} - -Host {{ host }} - Hostname {{ hostvars[host].ansible_host }} - ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22 - IdentityFile {{ hostvars[host].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% endfor %} diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2 deleted file mode 100644 index 0d1cf8f79..000000000 --- a/roles/static_inventory/templates/ssh-tunnel.service.j2 +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Set up ssh tunneling for OpenShift cluster UI -After=network.target - -[Service] -ExecStart=/usr/bin/ssh -NT -o \ - ServerAliveInterval=60 -o \ - UserKnownHostsFile=/dev/null -o \ - StrictHostKeyChecking=no -o \ - ExitOnForwardFailure=no -i \ - {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \ - -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }} - - -# Restart every >2 seconds to avoid StartLimitInterval failure -RestartSec=5 -Restart=always - -[Install] -WantedBy=multi-user.target -- cgit v1.2.1 From fabf16250b3947a04fc3b3bcb9b6fc7c1265651b Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 30 Oct 2017 17:53:02 +0100 Subject: Add a stub of the dns record update code in This will mostly not work but it's a starting point. --- roles/openshift_openstack/defaults/main.yml | 6 + roles/openshift_openstack/tasks/populate-dns.yml | 167 +++++++++++++++++++++++ 2 files changed, 173 insertions(+) (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 05f1c0911..19e6e6f51 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -47,3 +47,9 @@ docker_data_size: "95%VG" docker_dm_basesize: "3G" container_root_lv_name: "dockerlv" container_root_lv_mount_path: "/var/lib/docker" + + +# populate-dns +dns_records_rm: [] +dns_records_add: [] +external_nsupdate_keys: {} diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index f1a868a19..c8243dc1f 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -3,3 +3,170 @@ # this is an optional step -- the deployers may do whatever else they # wish here. + + +# TODO: build records +# TODO: run nsupdate + + +- name: "Generate list of private A records" + set_fact: + private_records: "{{ [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +# - name: "Add wildcard records to the private A records for infrahosts" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" +# with_items: "{{ groups['infra_hosts'] }}" + +# - name: "Add public master cluster hostname records to the private A records (single master)" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 + +# - name: "Add public master cluster hostname records to the private A records (multi-master)" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters > 1 + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +# - name: "Generate list of public A records" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" +# with_items: "{{ groups['cluster_hosts'] }}" +# when: hostvars[item]['public_v4'] is defined + +# - name: "Add wildcard records to the public A records" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" +# with_items: "{{ groups['infra_hosts'] }}" +# when: hostvars[item]['public_v4'] is defined + +# - name: "Add public master cluster hostname records to the public A records (single master)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 +# - not use_bastion|bool + +# - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 +# - use_bastion|bool + +# - name: "Add public master cluster hostname records to the public A records (multi-master)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters > 1 + +# - name: "Set the public DNS server details to use the external value (if provided)" +# set_fact: +# nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" +# nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" +# nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" +# nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" +# when: +# - external_nsupdate_keys is defined +# - external_nsupdate_keys['public'] is defined + +# - name: "Set the public DNS server details to use the provisioned value" +# set_fact: +# nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" +# nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" +# nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" +# when: +# - nsupdate_server_public is undefined + +# - name: "Generate the public Add section for DNS" +# set_fact: +# public_named_records: +# - view: "public" +# zone: "{{ full_dns_domain }}" +# server: "{{ nsupdate_server_public }}" +# key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" +# key_secret: "{{ nsupdate_key_secret_public }}" +# key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" +# entries: "{{ public_records }}" + + + + + + +- name: "Generate the final dns_records_add" + set_fact: + # TODO(shadower): enable this when we add public records + #dns_records_add: "{{ private_named_records + public_named_records }}" + dns_records_add: "{{ private_named_records }}" + + + +# RUN NSUPDATE + +- name: "Remove any deleted DNS A records" + nsupdate: + key_name: "{{ item.0.key_name }}" + key_secret: "{{ item.0.key_secret }}" + key_algorithm: "{{ item.0.key_algorithm }}" + server: "{{ item.0.server }}" + zone: "{{ item.0.zone }}" + record: "{{ item.1.hostname }}" + type: "{{ item.1.type }}" + state: absent + with_subelements: + - "{{ dns_records_rm | default({}) }}" + - entries + register: nsupdate_remove_result + until: nsupdate_remove_result|succeeded + retries: 10 + delay: 1 + +- name: "Add DNS A records" + nsupdate: + key_name: "{{ item.0.key_name }}" + key_secret: "{{ item.0.key_secret }}" + key_algorithm: "{{ item.0.key_algorithm }}" + server: "{{ item.0.server }}" + zone: "{{ item.0.zone }}" + record: "{{ item.1.hostname }}" + value: "{{ item.1.ip }}" + type: "{{ item.1.type }}" + state: present + with_subelements: + - "{{ dns_records_add | default({}) }}" + - entries + register: nsupdate_add_result + until: nsupdate_add_result|succeeded + retries: 10 + delay: 1 -- cgit v1.2.1 From 84259a3ed4ac741ee782f57884ba36729e277eae Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:14:10 +0100 Subject: Remove the subscription-manager role The repo already contains the `rhel_subscribe` role so we should use that instead. --- roles/subscription-manager/README.md | 156 --------------------- roles/subscription-manager/pre_tasks/pre_tasks.yml | 45 ------ roles/subscription-manager/tasks/main.yml | 150 -------------------- 3 files changed, 351 deletions(-) delete mode 100644 roles/subscription-manager/README.md delete mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml delete mode 100644 roles/subscription-manager/tasks/main.yml (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md deleted file mode 100644 index 748de282c..000000000 --- a/roles/subscription-manager/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# Red Hat Subscription Manager Ansible Role - -## Parameters - -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: - -### rhsm_satellite - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. - -Default: none - -### rhsm_username - -Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -Default: none - -### rhsm_password - -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. - -1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: - - ``` - - hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: - ``` - -2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): - 1. Create a file to contain the variable such as **secrets.yml**: - - ``` - --- - rhsm_password: "my_secret_password" - # other variables can optionally be placed here as well - ``` - - 2. Encrypt the file with **ansible-vault**: - - ``` - $ ansible-vault encrypt secrets.yml - Vault password: - Confirm Vault password: - Encryption successful - ``` - - 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - ``` - - NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. - -Default: none - -### rhsm_org - -Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. - -Default: none - -### rhsm_activationkey - -Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. - -Default: none - -### rhsm_pool - -Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. - -Default: none - -### rhsm_repos - -Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite - -NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: - -rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' - -Default: none - -## Calling This Role -Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. - -### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. - -To Add a prompt to capture **rhsm_password**: - -``` -- hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: -``` - -### pre-tasks - -A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: - -``` -pre_tasks: -- include: roles/subscription-manager/pre_tasks/pre_tasks.yml -``` - -### roles - -The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: - -``` -roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } -``` - -## Running Playbooks with this Role - -- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): - - ``` - $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " - ``` - -- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - - ``` - -- To register to a Satellite server with an activation key: - - ``` - $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " - - ``` -- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml deleted file mode 100644 index 464670fc0..000000000 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: "Set password fact" - set_fact: - rhsm_password: "{{ rhsm_password | default(None) }}" - no_log: true - -- name: "Initialize Subscription Manager fact" - set_fact: - rhsm_register: true - -- name: "Determine if Subscription Manager should be used" - set_fact: - rhsm_register: false - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' - - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - -- name: "Validate Subscription Manager organization is set" - fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" - when: - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - - rhsm_register - -- name: "Validate Subscription Manager authentication is defined" - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" - when: - - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_register - -- name: "Validate activation key and Hosted are not requested together" - fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml deleted file mode 100644 index e4c9fdffb..000000000 --- a/roles/subscription-manager/tasks/main.yml +++ /dev/null @@ -1,150 +0,0 @@ ---- -- name: "Initialize rhsm_password variable if vars_prompt was used" - set_fact: - rhsm_password: "{{ hostvars.localhost.rhsm_password }}" - when: - - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - -- name: "Initializing Subscription Manager authentication method" - set_fact: - rhsm_authentication: false - -# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: "Setting Subscription Manager Activation Key Fact" - set_fact: - rhsm_authentication: "key" - when: - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - not rhsm_authentication - -# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: "Setting Subscription Manager Username and Password Fact" - set_fact: - rhsm_authentication: "password" - when: - - rhsm_username is defined - - rhsm_username is not none - - rhsm_username|trim != '' - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - - not rhsm_authentication - -- name: "Initializing registration status" - set_fact: - registered: false - -- name: "Checking subscription status (a failure means it is not registered and will be)" - command: "/usr/bin/subscription-manager status" - ignore_errors: yes - changed_when: no - register: check_if_registered - -- name: "Set registration fact if system is already registered" - set_fact: - registered: true - when: check_if_registered.rc == 0 - -- name: "Cleaning any old subscriptions" - command: "/usr/bin/subscription-manager clean" - when: - - not registered - - rhsm_authentication is defined - register: cleaningsubs_result - until: cleaningsubs_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Install Satellite certificate" - command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" - when: - - not registered - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - -- name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" - when: - - not registered - - rhsm_authentication == 'key' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - register: register_key_result - until: register_key_result.rc == 0 - retries: 10 - delay: 1 - -# This can apply to either Hosted or Satellite -- name: "Register using username and password" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' - register: register_userpw_result - until: register_userpw_result.rc == 0 - retries: 10 - delay: 1 - -# This can apply to either Hosted or Satellite -- name: "Register using username, password and organization" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is defined - - rhsm_org is not none - - rhsm_org|trim != '' - register: register_userpworg_result - until: register_userpworg_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Auto-attach to Subscription Manager Pool" - command: "/usr/bin/subscription-manager attach --auto" - when: - - not registered - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - register: autoattach_result - until: autoattach_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Attach to a specific pool" - command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" - when: - - rhsm_pool is defined - - rhsm_pool is not none - - rhsm_pool|trim != '' - - not registered - register: attachpool_result - until: attachpool_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Disable all repositories" - command: "/usr/bin/subscription-manager repos --disable=*" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' - -- name: "Enable specified repositories" - command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: "{{ rhsm_repos }}" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' - register: enablerepos_result - until: enablerepos_result.rc == 0 - retries: 10 - delay: 1 -- cgit v1.2.1 From e34025f43b1a8b03b0a5e74bb1dfea946375dbf7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:52:25 +0100 Subject: Move the vars/main.yml to defaults The contents of roles/openshift_openstack/vars/main.yml were moved to the defaults/main.yml file instead. There are now duplication warnings we need to address, but the deployment does still work. --- roles/openshift_openstack/defaults/main.yml | 53 +++++++++++++++++++++++++++++ roles/openshift_openstack/vars/main.yml | 49 -------------------------- 2 files changed, 53 insertions(+), 49 deletions(-) delete mode 100644 roles/openshift_openstack/vars/main.yml (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 19e6e6f51..d1408abf0 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -53,3 +53,56 @@ container_root_lv_mount_path: "/var/lib/docker" dns_records_rm: [] dns_records_add: [] external_nsupdate_keys: {} + +full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" +openshift_app_domain: "apps" + + + +# heat vars +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" diff --git a/roles/openshift_openstack/vars/main.yml b/roles/openshift_openstack/vars/main.yml deleted file mode 100644 index a4da31bfe..000000000 --- a/roles/openshift_openstack/vars/main.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} - {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} - {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" -use_bastion: "{{ openstack_use_bastion|default(False) }}" -ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" -- cgit v1.2.1 From 23674d565f2801d88060bd0443ec384fbdcdad59 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 15:47:04 +0100 Subject: Remove the subnet_update_dns_servers task list It's no longer being used. --- roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml (limited to 'roles') diff --git a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml deleted file mode 100644 index af28fc98f..000000000 --- a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Live update the subnet's DNS servers - os_subnet: - name: openshift-ansible-{{ stack_name }}-subnet - network_name: openshift-ansible-{{ stack_name }}-net - state: present - use_default_subnetpool: yes - dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" - when: not provider_network -- cgit v1.2.1 From 79f29bc825286c4f69073827a5b6d71f71f47c91 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 16:43:13 +0100 Subject: Add the DNS updates and rename the openstack vars Most of the vars in `roles/openshift_openstack/defaults/main.yml` are now prefixed with `openstack_`. --- roles/openshift_openstack/defaults/main.yml | 91 +++++----- .../tasks/check-prerequisites.yml | 4 +- .../tasks/generate-templates.yml | 3 + roles/openshift_openstack/tasks/hostname.yml | 49 +++--- roles/openshift_openstack/tasks/populate-dns.yml | 187 ++++++++------------ roles/openshift_openstack/tasks/provision.yml | 5 - .../templates/heat_stack.yaml.j2 | 190 ++++++++++----------- .../templates/heat_stack_server.yaml.j2 | 14 +- 8 files changed, 238 insertions(+), 305 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index d1408abf0..aa03c088e 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -1,5 +1,4 @@ --- - stack_state: 'present' ssh_ingress_cidr: 0.0.0.0/0 @@ -7,18 +6,13 @@ node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 lb_ingress_cidr: 0.0.0.0/0 bastion_ingress_cidr: 0.0.0.0/0 -num_etcd: 0 -num_masters: 1 -num_nodes: 1 -num_dns: 1 -num_infra: 1 -nodes_to_remove: [] -etcd_volume_size: 2 -dns_volume_size: 1 -lb_volume_size: 5 -use_bastion: False -ui_ssh_tunnel: False -provider_network: False +openstack_num_etcd: 0 +openstack_num_masters: 1 +openstack_num_nodes: 1 +openstack_num_dns: 0 +openstack_num_infra: 1 +openstack_dns_nameservers: [] +openstack_nodes_to_remove: [] openshift_cluster_node_labels: @@ -61,48 +55,41 @@ openshift_app_domain: "apps" # heat vars stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_subnet_prefix: "192.168.99" +openstack_master_hostname: master +openstack_infra_hostname: infra-node +openstack_node_hostname: app-node +openstack_lb_hostname: lb +openstack_etcd_hostname: etcd +openstack_dns_hostname: dns +openstack_keypair_name: openshift +openstack_lb_flavor: "{{ openstack_default_flavor }}" +openstack_etcd_flavor: "{{ openstack_default_flavor }}" +openstack_master_flavor: "{{ openstack_default_flavor }}" +openstack_node_flavor: "{{ openstack_default_flavor }}" +openstack_infra_flavor: "{{ openstack_default_flavor }}" +openstack_dns_flavor: "{{ openstack_default_flavor }}" +openstack_master_image: "{{ openstack_default_image_name }}" +openstack_infra_image: "{{ openstack_default_image_name }}" +openstack_node_image: "{{ openstack_default_image_name }}" +openstack_lb_image: "{{ openstack_default_image_name }}" +openstack_etcd_image: "{{ openstack_default_image_name }}" +openstack_dns_image: "{{ openstack_default_image_name }}" +openstack_provider_network_name: False +openstack_external_network_name: False openstack_private_network: >- {% if openstack_provider_network_name | default(None) -%} {{ openstack_provider_network_name }} {%- else -%} {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +openstack_master_server_group_policies: [] +openstack_infra_server_group_policies: [] +openstack_master_volume_size: "{{ docker_volume_size }}" +openstack_infra_volume_size: "{{ docker_volume_size }}" +openstack_node_volume_size: "{{ docker_volume_size }}" +openstack_etcd_volume_size: 2 +openstack_dns_volume_size: 1 +openstack_lb_volume_size: 5 +openstack_use_bastion: false +openshift_ui_ssh_tunnel: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 4d7cfbf11..13000e31f 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -76,13 +76,13 @@ - name: Try to show keypair command: > python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + exit(cloud.get_keypair("{{ openstack_keypair_name }}") is None)' ignore_errors: yes register: key_result - name: Check that keypair is available assert: that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_ssh_public_key }} is not available" + msg: "Keypair {{ openstack_keypair_name }} is not available" # Check that custom images are available - include: custom_image_check.yaml diff --git a/roles/openshift_openstack/tasks/generate-templates.yml b/roles/openshift_openstack/tasks/generate-templates.yml index 0ff50a095..3a8b588e9 100644 --- a/roles/openshift_openstack/tasks/generate-templates.yml +++ b/roles/openshift_openstack/tasks/generate-templates.yml @@ -10,6 +10,9 @@ stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" user_data_template_path: "{{ stack_template_pre.path }}/user-data" +- name: Print out the Heat template directory + debug: var=stack_template_pre + - name: generate HOT stack template from jinja2 template template: src: heat_stack.yaml.j2 diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml index 0fc8fbc4c..9815d0e80 100644 --- a/roles/openshift_openstack/tasks/hostname.yml +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -1,33 +1,26 @@ --- -- name: "Verify hostname" - command: hostnamectl status --static - register: hostname_fqdn +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" -- name: "Set hostname if required" - when: hostname_fqdn.stdout != ansible_fqdn - block: - - name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - - name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" - - name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg - - name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - - - name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index c8243dc1f..669b65a01 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,37 +1,26 @@ -# TODO: use nsupdate to populate the DNS servers using the keys -# specified in the inventory. - -# this is an optional step -- the deployers may do whatever else they -# wish here. - - -# TODO: build records -# TODO: run nsupdate - - - name: "Generate list of private A records" set_fact: - private_records: "{{ [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" -# - name: "Add wildcard records to the private A records for infrahosts" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" -# with_items: "{{ groups['infra_hosts'] }}" - -# - name: "Add public master cluster hostname records to the private A records (single master)" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 - -# - name: "Add public master cluster hostname records to the private A records (multi-master)" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters > 1 +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Add public master cluster hostname records to the private A records (single master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the private A records (multi-master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 - name: "Set the private DNS server to use the external value (if provided)" set_fact: @@ -55,102 +44,67 @@ key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" -# - name: "Generate list of public A records" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" -# with_items: "{{ groups['cluster_hosts'] }}" -# when: hostvars[item]['public_v4'] is defined - -# - name: "Add wildcard records to the public A records" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" -# with_items: "{{ groups['infra_hosts'] }}" -# when: hostvars[item]['public_v4'] is defined - -# - name: "Add public master cluster hostname records to the public A records (single master)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 -# - not use_bastion|bool - -# - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 -# - use_bastion|bool - -# - name: "Add public master cluster hostname records to the public A records (multi-master)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters > 1 - -# - name: "Set the public DNS server details to use the external value (if provided)" -# set_fact: -# nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" -# nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" -# nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" -# nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" -# when: -# - external_nsupdate_keys is defined -# - external_nsupdate_keys['public'] is defined - -# - name: "Set the public DNS server details to use the provisioned value" -# set_fact: -# nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" -# nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" -# nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" -# when: -# - nsupdate_server_public is undefined - -# - name: "Generate the public Add section for DNS" -# set_fact: -# public_named_records: -# - view: "public" -# zone: "{{ full_dns_domain }}" -# server: "{{ nsupdate_server_public }}" -# key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" -# key_secret: "{{ nsupdate_key_secret_public }}" -# key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" -# entries: "{{ public_records }}" - +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined +- name: "Add public master cluster hostname records to the public A records (single master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - not openstack_use_bastion|bool +- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - openstack_use_bastion|bool +- name: "Add public master cluster hostname records to the public A records (multi-master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 -- name: "Generate the final dns_records_add" +- name: "Set the public DNS server details to use the external value (if provided)" set_fact: - # TODO(shadower): enable this when we add public records - #dns_records_add: "{{ private_named_records + public_named_records }}" - dns_records_add: "{{ private_named_records }}" + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" -# RUN NSUPDATE +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" -- name: "Remove any deleted DNS A records" - nsupdate: - key_name: "{{ item.0.key_name }}" - key_secret: "{{ item.0.key_secret }}" - key_algorithm: "{{ item.0.key_algorithm }}" - server: "{{ item.0.server }}" - zone: "{{ item.0.zone }}" - record: "{{ item.1.hostname }}" - type: "{{ item.1.type }}" - state: absent - with_subelements: - - "{{ dns_records_rm | default({}) }}" - - entries - register: nsupdate_remove_result - until: nsupdate_remove_result|succeeded - retries: 10 - delay: 1 - name: "Add DNS A records" nsupdate: @@ -162,6 +116,7 @@ record: "{{ item.1.hostname }}" value: "{{ item.1.ip }}" type: "{{ item.1.type }}" + # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - "{{ dns_records_add | default({}) }}" diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml index 8ebda8100..e693f535a 100644 --- a/roles/openshift_openstack/tasks/provision.yml +++ b/roles/openshift_openstack/tasks/provision.yml @@ -16,11 +16,6 @@ - name: Add the new nodes to the inventory meta: refresh_inventory -- name: Populate DNS entries - include: populate-dns.yml - when: - - stack_state == 'present' - - name: CleanUp include: cleanup.yml when: diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 2359842a5..28634f9a4 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -54,7 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -72,11 +72,11 @@ outputs: {% endif %} conditions: - no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openstack_provider_network_name or openstack_use_bastion|bool %}true{% else %}false{% endif %} resources: -{% if not provider_network %} +{% if not openstack_provider_network_name %} net: type: OS::Neutron::Net properties: @@ -99,20 +99,20 @@ resources: str_replace: template: subnet_24_prefix.0/24 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} allocation_pools: - start: str_replace: template: subnet_24_prefix.3 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} end: str_replace: template: subnet_24_prefix.254 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} dns_nameservers: -{% for nameserver in dns_nameservers %} +{% for nameserver in openstack_dns_nameservers %} - {{ nameserver }} {% endfor %} @@ -141,7 +141,7 @@ resources: params: cluster_id: {{ stack_name }} external_gateway_info: - network: {{ external_network }} + network: {{ openstack_external_network_name }} interface: type: OS::Neutron::RouterInterface @@ -159,7 +159,7 @@ resources: # template: openshift-ansible-cluster_id-keypair # params: # cluster_id: {{ stack_name }} -# public_key: {{ ssh_public_key }} +# public_key: {{ openstack_keypair_name }} common-secgrp: type: OS::Neutron::SecurityGroup @@ -180,7 +180,7 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if use_bastion|bool %} +{% if openstack_use_bastion|bool %} - direction: ingress protocol: tcp port_range_min: 22 @@ -443,7 +443,7 @@ resources: port_range_min: 443 port_range_max: 443 -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -480,7 +480,7 @@ resources: remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" {% endif %} -{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} +{% if openstack_num_masters|int > 1 or openshift_ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -492,7 +492,7 @@ resources: port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if ui_ssh_tunnel|bool %} +{% if openshift_ui_ssh_tunnel|bool %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} @@ -511,7 +511,7 @@ resources: etcd: type: OS::Heat::ResourceGroup properties: - count: {{ num_etcd }} + count: {{ openstack_num_etcd }} resource_def: type: server.yaml properties: @@ -520,7 +520,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname | default('etcd') }} + k8s_type: {{ openstack_etcd_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -530,12 +530,12 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_etcd_image | default(openstack_image) }} - flavor: {{ etcd_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_etcd_image }} + flavor: {{ openstack_etcd_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -552,31 +552,31 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ etcd_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_etcd_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if master_server_group_policies|length > 0 %} +{% if openstack_master_server_group_policies|length > 0 %} master_server_group: type: OS::Nova::ServerGroup properties: name: master_server_group - policies: {{ master_server_group_policies }} + policies: {{ openstack_master_server_group_policies }} {% endif %} -{% if infra_server_group_policies|length > 0 %} +{% if openstack_infra_server_group_policies|length > 0 %} infra_server_group: type: OS::Nova::ServerGroup properties: name: infra_server_group - policies: {{ infra_server_group_policies }} + policies: {{ openstack_infra_server_group_policies }} {% endif %} -{% if num_masters|int > 1 %} +{% if openstack_num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -589,7 +589,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname | default('lb') }} + k8s_type: {{ openstack_lb_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -599,12 +599,12 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_lb_image | default(openstack_image) }} - flavor: {{ lb_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_lb_image }} + flavor: {{ openstack_lb_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -617,11 +617,11 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ lb_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_lb_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -630,7 +630,7 @@ resources: masters: type: OS::Heat::ResourceGroup properties: - count: {{ num_masters }} + count: {{ openstack_num_masters }} resource_def: type: server.yaml properties: @@ -639,7 +639,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname | default('master')}} + k8s_type: {{ openstack_master_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -649,12 +649,12 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_master_image | default(openstack_image) }} - flavor: {{ master_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_master_image }} + flavor: {{ openstack_master_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -675,7 +675,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd|int == 0 %} +{% if openstack_num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -684,16 +684,16 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ master_volume_size }} -{% if master_server_group_policies|length > 0 %} + volume_size: {{ openstack_master_volume_size }} +{% if openstack_master_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: master_server_group } {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -701,9 +701,9 @@ resources: compute_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ num_nodes }} + count: {{ openstack_num_nodes }} removal_policies: - - resource_list: {{ nodes_to_remove }} + - resource_list: {{ openstack_nodes_to_remove }} resource_def: type: server.yaml properties: @@ -712,7 +712,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname | default('app-node') }} + sub_type_k8s_type: {{ openstack_node_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -727,12 +727,12 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image | default(openstack_image) }} - flavor: {{ node_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_node_image }} + flavor: {{ openstack_node_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -754,12 +754,12 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ node_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_node_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -767,7 +767,7 @@ resources: infra_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ num_infra }} + count: {{ openstack_num_infra }} resource_def: type: server.yaml properties: @@ -776,7 +776,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname | default('infranode') }} + sub_type_k8s_type: {{ openstack_infra_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -791,12 +791,12 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image | default(openstack_image) }} - flavor: {{ infra_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_infra_image }} + flavor: {{ openstack_infra_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -818,29 +818,29 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} +{% if openshift_ui_ssh_tunnel|bool and openstack_num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ infra_volume_size }} -{% if infra_server_group_policies|length > 0 %} + volume_size: {{ openstack_infra_volume_size }} +{% if openstack_infra_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: infra_server_group } {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: - count: {{ num_dns }} + count: {{ openstack_num_dns }} resource_def: type: server.yaml properties: @@ -849,7 +849,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname | default('dns') }} + k8s_type: {{ openstack_dns_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -859,12 +859,12 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_dns_image | default(openstack_image) }} - flavor: {{ dns_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_dns_image }} + flavor: {{ openstack_dns_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -877,11 +877,11 @@ resources: secgrp: - { get_resource: dns-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ dns_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_dns_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 9ffe721a5..160345baf 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -61,7 +61,7 @@ parameters: label: Net name description: Net name -{% if not provider_network %} +{% if not openstack_provider_network_name %} subnet: type: string label: Subnet ID @@ -81,7 +81,7 @@ parameters: label: Net ID description: Net resource -{% if not provider_network %} +{% if not openstack_provider_network_name %} data_subnet: type: string default: '' @@ -102,7 +102,7 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not provider_network %} +{% if not openstack_provider_network_name %} floating_network: type: string default: '' @@ -156,7 +156,7 @@ outputs: - server - addresses - { get_param: net_name } -{% if provider_network %} +{% if openstack_provider_network_name %} - 0 {% else %} - 1 @@ -226,7 +226,7 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } -{% if not provider_network %} +{% if not openstack_provider_network_name %} fixed_ips: - subnet: { get_param: subnet } {% endif %} @@ -239,13 +239,13 @@ resources: properties: network: { get_param: data_net } port_security_enabled: false -{% if not provider_network %} +{% if not openstack_provider_network_name %} fixed_ips: - subnet: { get_param: data_subnet } {% endif %} {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} floating-ip: condition: { not: no_floating } type: OS::Neutron::FloatingIP -- cgit v1.2.1 From f462e7a682cb65085864d7eff4b7898fe8555a75 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:07:08 +0100 Subject: Move the selinux check up --- roles/openshift_openstack/tasks/node-configuration.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml index 8a6a8022f..89e58d830 100644 --- a/roles/openshift_openstack/tasks/node-configuration.yml +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -1,11 +1,11 @@ --- +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" + - include: hostname.yml - include: container-storage-setup.yml - include: node-network.yml - -- name: "Verify SELinux is enforcing" - fail: - msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" - when: ansible_selinux.config_mode != "enforcing" -- cgit v1.2.1 From bde35d577f4ccb786a65a84142fabe90eb903599 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:15:42 +0100 Subject: Use the default `item` loop variable for checks --- roles/openshift_openstack/tasks/check-prerequisites.yml | 16 ++++++---------- roles/openshift_openstack/tasks/custom_flavor_check.yaml | 5 +++-- roles/openshift_openstack/tasks/custom_image_check.yaml | 4 ++-- 3 files changed, 11 insertions(+), 14 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 13000e31f..a91e60640 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -93,17 +93,13 @@ - "{{ openstack_lb_image }}" - "{{ openstack_etcd_image }}" - "{{ openstack_dns_image }}" - loop_control: - loop_var: image # Check that custom flavors are available - include: custom_flavor_check.yaml with_items: - - "{{ master_flavor }}" - - "{{ infra_flavor }}" - - "{{ node_flavor }}" - - "{{ lb_flavor }}" - - "{{ etcd_flavor }}" - - "{{ dns_flavor }}" - loop_control: - loop_var: flavor + - "{{ openstack_master_flavor }}" + - "{{ openstack_infra_flavor }}" + - "{{ openstack_node_flavor }}" + - "{{ openstack_lb_flavor }}" + - "{{ openstack_etcd_flavor }}" + - "{{ openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/custom_flavor_check.yaml b/roles/openshift_openstack/tasks/custom_flavor_check.yaml index e11874c28..5fb7a76ff 100644 --- a/roles/openshift_openstack/tasks/custom_flavor_check.yaml +++ b/roles/openshift_openstack/tasks/custom_flavor_check.yaml @@ -1,9 +1,10 @@ --- - name: Try to get flavor facts os_flavor_facts: - name: "{{ flavor }}" + name: "{{ item }}" register: flavor_result + - name: Check that custom flavor is available assert: that: "flavor_result.ansible_facts.openstack_flavors" - msg: "Flavor {{ flavor }} is not available." + msg: "Flavor {{ item }} is not available." diff --git a/roles/openshift_openstack/tasks/custom_image_check.yaml b/roles/openshift_openstack/tasks/custom_image_check.yaml index 4fbd6a687..4ae163406 100644 --- a/roles/openshift_openstack/tasks/custom_image_check.yaml +++ b/roles/openshift_openstack/tasks/custom_image_check.yaml @@ -1,10 +1,10 @@ --- - name: Try to get image facts os_image_facts: - image: "{{ image }}" + image: "{{ item }}" register: image_result - name: Check that custom image is available assert: that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ image }} is not available." + msg: "Image {{ item }} is not available." -- cgit v1.2.1 From 4fd33e96eed4d1d5eaca0af8f2ef3e81fcaf5498 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:44:41 +0100 Subject: Simplify the template paths for the storage setup Because the templates are present in a role, the `template` module is able to look them up directly, without having to use `{{ role_path }}/templates`. --- roles/openshift_openstack/tasks/container-storage-setup.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml index 5cd48ca2c..82307b208 100644 --- a/roles/openshift_openstack/tasks/container-storage-setup.yml +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -2,7 +2,7 @@ - block: - name: create the docker-storage config file template: - src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + src: docker-storage-setup-overlayfs.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root @@ -14,7 +14,7 @@ - block: - name: create the docker-storage-setup config file template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + src: docker-storage-setup-dm.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root @@ -26,7 +26,7 @@ - block: - name: create the docker-storage-setup config file for CentOS template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + src: docker-storage-setup-dm.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root -- cgit v1.2.1 From ad84935b5021da5ab0d21ffdf630079c1a59083d Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:52:30 +0100 Subject: Use `null` instead of `False` where it makes sense The `openstack_*_network_name` vars are strings, not booleans, so the absense shouldn't really be marked by `False`. --- roles/openshift_openstack/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index aa03c088e..1f9c09c96 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -75,8 +75,8 @@ openstack_node_image: "{{ openstack_default_image_name }}" openstack_lb_image: "{{ openstack_default_image_name }}" openstack_etcd_image: "{{ openstack_default_image_name }}" openstack_dns_image: "{{ openstack_default_image_name }}" -openstack_provider_network_name: False -openstack_external_network_name: False +openstack_provider_network_name: null +openstack_external_network_name: null openstack_private_network: >- {% if openstack_provider_network_name | default(None) -%} {{ openstack_provider_network_name }} -- cgit v1.2.1 From b95170503613bb97c00175324b31ed91f6f41ea1 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 11:03:39 +0100 Subject: Namespace the OpenStack vars This makes sure that all the variables used in the `openshift_openstack` role are prefixed with `openshift_openstack_` as is the convention. --- roles/openshift_openstack/defaults/main.yml | 132 ++++---- .../tasks/check-prerequisites.yml | 40 +-- roles/openshift_openstack/tasks/hostname.yml | 2 +- .../openshift_openstack/tasks/net_vars_check.yaml | 2 +- roles/openshift_openstack/tasks/node-packages.yml | 6 +- roles/openshift_openstack/tasks/populate-dns.yml | 66 ++-- roles/openshift_openstack/tasks/provision.yml | 8 +- .../templates/docker-storage-setup-dm.j2 | 8 +- .../templates/docker-storage-setup-overlayfs.j2 | 10 +- .../templates/heat_stack.yaml.j2 | 336 ++++++++++----------- .../templates/heat_stack_server.yaml.j2 | 16 +- 11 files changed, 314 insertions(+), 312 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 1f9c09c96..3eca52963 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -1,28 +1,27 @@ --- -stack_state: 'present' +openshift_openstack_stack_state: 'present' -ssh_ingress_cidr: 0.0.0.0/0 -node_ingress_cidr: 0.0.0.0/0 -master_ingress_cidr: 0.0.0.0/0 -lb_ingress_cidr: 0.0.0.0/0 -bastion_ingress_cidr: 0.0.0.0/0 -openstack_num_etcd: 0 -openstack_num_masters: 1 -openstack_num_nodes: 1 -openstack_num_dns: 0 -openstack_num_infra: 1 -openstack_dns_nameservers: [] -openstack_nodes_to_remove: [] +openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0 +openshift_openstack_node_ingress_cidr: 0.0.0.0/0 +openshift_openstack_lb_ingress_cidr: 0.0.0.0/0 +openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0 +openshift_openstack_num_etcd: 0 +openshift_openstack_num_masters: 1 +openshift_openstack_num_nodes: 1 +openshift_openstack_num_dns: 0 +openshift_openstack_num_infra: 1 +openshift_openstack_dns_nameservers: [] +openshift_openstack_nodes_to_remove: [] -openshift_cluster_node_labels: +openshift_openstack_cluster_node_labels: app: region: primary infra: region: infra -install_debug_packages: false -required_packages: +openshift_openstack_install_debug_packages: false +openshift_openstack_required_packages: - docker - NetworkManager - wget @@ -30,66 +29,69 @@ required_packages: - net-tools - bind-utils - bridge-utils -debug_packages: +openshift_openstack_debug_packages: - bash-completion - vim-enhanced # container-storage-setup -docker_dev: "/dev/sdb" -docker_vg: "docker-vol" -docker_data_size: "95%VG" -docker_dm_basesize: "3G" -container_root_lv_name: "dockerlv" -container_root_lv_mount_path: "/var/lib/docker" +openshift_openstack_container_storage_setup: + docker_dev: "/dev/sdb" + docker_vg: "docker-vol" + docker_data_size: "95%VG" + docker_dm_basesize: "3G" + container_root_lv_name: "dockerlv" + container_root_lv_mount_path: "/var/lib/docker" # populate-dns -dns_records_rm: [] -dns_records_add: [] -external_nsupdate_keys: {} +openshift_openstack_dns_records_add: [] +openshift_openstack_external_nsupdate_keys: {} -full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" -openshift_app_domain: "apps" +openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" +openshift_openstack_app_subdomain: "apps" # heat vars -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -openstack_subnet_prefix: "192.168.99" -openstack_master_hostname: master -openstack_infra_hostname: infra-node -openstack_node_hostname: app-node -openstack_lb_hostname: lb -openstack_etcd_hostname: etcd -openstack_dns_hostname: dns -openstack_keypair_name: openshift -openstack_lb_flavor: "{{ openstack_default_flavor }}" -openstack_etcd_flavor: "{{ openstack_default_flavor }}" -openstack_master_flavor: "{{ openstack_default_flavor }}" -openstack_node_flavor: "{{ openstack_default_flavor }}" -openstack_infra_flavor: "{{ openstack_default_flavor }}" -openstack_dns_flavor: "{{ openstack_default_flavor }}" -openstack_master_image: "{{ openstack_default_image_name }}" -openstack_infra_image: "{{ openstack_default_image_name }}" -openstack_node_image: "{{ openstack_default_image_name }}" -openstack_lb_image: "{{ openstack_default_image_name }}" -openstack_etcd_image: "{{ openstack_default_image_name }}" -openstack_dns_image: "{{ openstack_default_image_name }}" -openstack_provider_network_name: null -openstack_external_network_name: null -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} +openshift_openstack_clusterid: openshift +openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" +openshift_openstack_subnet_prefix: "192.168.99" +openshift_openstack_master_hostname: master +openshift_openstack_infra_hostname: infra-node +openshift_openstack_node_hostname: app-node +openshift_openstack_lb_hostname: lb +openshift_openstack_etcd_hostname: etcd +openshift_openstack_dns_hostname: dns +openshift_openstack_keypair_name: openshift +openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_provider_network_name: null +openshift_openstack_external_network_name: null +openshift_openstack_private_network: >- + {% if openshift_openstack_provider_network_name | default(None) -%} + {{ openshift_openstack_provider_network_name }} {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {{ openshift_openstack_private_network_name | default ('openshift-ansible-' + openshift_openstack_stack_name + '-net') }} {%- endif -%} -openstack_master_server_group_policies: [] -openstack_infra_server_group_policies: [] -openstack_master_volume_size: "{{ docker_volume_size }}" -openstack_infra_volume_size: "{{ docker_volume_size }}" -openstack_node_volume_size: "{{ docker_volume_size }}" -openstack_etcd_volume_size: 2 -openstack_dns_volume_size: 1 -openstack_lb_volume_size: 5 -openstack_use_bastion: false -openshift_ui_ssh_tunnel: false +openshift_openstack_master_server_group_policies: [] +openshift_openstack_infra_server_group_policies: [] +openshift_openstack_docker_volume_size: 15 +openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_etcd_volume_size: 2 +openshift_openstack_dns_volume_size: 1 +openshift_openstack_lb_volume_size: 5 +openshift_openstack_use_bastion: false +openshift_openstack_ui_ssh_tunnel: false +openshift_openstack_ephemeral_volumes: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index a91e60640..57c7238d1 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -50,24 +50,24 @@ # Check Glance image - name: Try to get image facts os_image_facts: - image: "{{ openstack_default_image_name }}" + image: "{{ openshift_openstack_default_image_name }}" register: image_result - name: Check that image is available assert: that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ openstack_default_image_name }} is not available" + msg: "Image {{ openshift_openstack_default_image_name }} is not available" # Check network name - name: Try to get network facts os_networks_facts: - name: "{{ openstack_external_network_name }}" + name: "{{ openshift_openstack_external_network_name }}" register: network_result - when: not openstack_provider_network_name|default(None) + when: not openshift_openstack_provider_network_name|default(None) - name: Check that network is available assert: that: "network_result.ansible_facts.openstack_networks" - msg: "Network {{ openstack_external_network_name }} is not available" - when: not openstack_provider_network_name|default(None) + msg: "Network {{ openshift_openstack_external_network_name }} is not available" + when: not openshift_openstack_provider_network_name|default(None) # Check keypair # TODO kpilatov: there is no Ansible module for getting OS keypairs @@ -76,30 +76,30 @@ - name: Try to show keypair command: > python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_keypair_name }}") is None)' + exit(cloud.get_keypair("{{ openshift_openstack_keypair_name }}") is None)' ignore_errors: yes register: key_result - name: Check that keypair is available assert: that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_keypair_name }} is not available" + msg: "Keypair {{ openshift_openstack_keypair_name }} is not available" # Check that custom images are available - include: custom_image_check.yaml with_items: - - "{{ openstack_master_image }}" - - "{{ openstack_infra_image }}" - - "{{ openstack_node_image }}" - - "{{ openstack_lb_image }}" - - "{{ openstack_etcd_image }}" - - "{{ openstack_dns_image }}" + - "{{ openshift_openstack_master_image }}" + - "{{ openshift_openstack_infra_image }}" + - "{{ openshift_openstack_node_image }}" + - "{{ openshift_openstack_lb_image }}" + - "{{ openshift_openstack_etcd_image }}" + - "{{ openshift_openstack_dns_image }}" # Check that custom flavors are available - include: custom_flavor_check.yaml with_items: - - "{{ openstack_master_flavor }}" - - "{{ openstack_infra_flavor }}" - - "{{ openstack_node_flavor }}" - - "{{ openstack_lb_flavor }}" - - "{{ openstack_etcd_flavor }}" - - "{{ openstack_dns_flavor }}" + - "{{ openshift_openstack_master_flavor }}" + - "{{ openshift_openstack_infra_flavor }}" + - "{{ openshift_openstack_node_flavor }}" + - "{{ openshift_openstack_lb_flavor }}" + - "{{ openshift_openstack_etcd_flavor }}" + - "{{ openshift_openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml index 9815d0e80..e1a18425f 100644 --- a/roles/openshift_openstack/tasks/hostname.yml +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -5,7 +5,7 @@ - name: Setting FQDN Fact set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" + new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}" - name: Setting hostname and DNS domain hostname: name="{{ new_fqdn }}" diff --git a/roles/openshift_openstack/tasks/net_vars_check.yaml b/roles/openshift_openstack/tasks/net_vars_check.yaml index 68afde415..18b9b21b9 100644 --- a/roles/openshift_openstack/tasks/net_vars_check.yaml +++ b/roles/openshift_openstack/tasks/net_vars_check.yaml @@ -3,7 +3,7 @@ fail: msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" when: - - openstack_provider_network_name is defined + - openshift_openstack_provider_network_name is defined - openstack_private_data_network_name is defined - name: Check the flannel network configuration diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml index c65eaec3b..7864f5269 100644 --- a/roles/openshift_openstack/tasks/node-packages.yml +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -5,11 +5,11 @@ yum: name: "{{ item }}" state: latest - with_items: "{{ required_packages }}" + with_items: "{{ openshift_openstack_required_packages }}" - name: Install debug packages (optional) yum: name: "{{ item }}" state: latest - with_items: "{{ debug_packages }}" - when: install_debug_packages|bool + with_items: "{{ openshift_openstack_debug_packages }}" + when: openshift_openstack_install_debug_packages|bool diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index 669b65a01..080c3aca9 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -5,41 +5,41 @@ - name: "Add wildcard records to the private A records for infrahosts" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" - name: "Add public master cluster hostname records to the private A records (single master)" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 + - openshift_openstack_num_masters == 1 - name: "Add public master cluster hostname records to the private A records (multi-master)" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 + - openshift_openstack_num_masters > 1 - name: "Set the private DNS server to use the external value (if provided)" set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" + nsupdate_server_private: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['private'] is defined - name: "Generate the private Add section for DNS" set_fact: private_named_records: - view: "private" - zone: "{{ full_dns_domain }}" + zone: "{{ openshift_openstack_full_dns_domain }}" server: "{{ nsupdate_server_private }}" - key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + openshift_openstack_full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_private }}" key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" @@ -52,58 +52,58 @@ - name: "Add wildcard records to the public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" when: hostvars[item]['public_v4'] is defined - name: "Add public master cluster hostname records to the public A records (single master)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - not openstack_use_bastion|bool + - openshift_openstack_num_masters == 1 + - not openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - openstack_use_bastion|bool + - openshift_openstack_num_masters == 1 + - openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 + - openshift_openstack_num_masters > 1 - name: "Set the public DNS server details to use the external value (if provided)" set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" + nsupdate_server_public: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['public'] is defined - name: "Generate the public Add section for DNS" set_fact: public_named_records: - view: "public" - zone: "{{ full_dns_domain }}" + zone: "{{ openshift_openstack_full_dns_domain }}" server: "{{ nsupdate_server_public }}" - key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + openshift_openstack_full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_public }}" key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" entries: "{{ public_records }}" -- name: "Generate the final dns_records_add" +- name: "Generate the final openshift_openstack_dns_records_add" set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" + openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}" - name: "Add DNS A records" @@ -119,7 +119,7 @@ # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - - "{{ dns_records_add | default({}) }}" + - "{{ openshift_openstack_dns_records_add | default({}) }}" - entries register: nsupdate_add_result until: nsupdate_add_result|succeeded diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml index e693f535a..dccbe334c 100644 --- a/roles/openshift_openstack/tasks/provision.yml +++ b/roles/openshift_openstack/tasks/provision.yml @@ -2,14 +2,14 @@ - name: Generate the templates include: generate-templates.yml when: - - stack_state == 'present' + - openshift_openstack_stack_state == 'present' - name: Handle the Stack (create/delete) ignore_errors: False register: stack_create os_stack: - name: "{{ stack_name }}" - state: "{{ stack_state }}" + name: "{{ openshift_openstack_stack_name }}" + state: "{{ openshift_openstack_stack_state }}" template: "{{ stack_template_path | default(omit) }}" wait: yes @@ -19,7 +19,7 @@ - name: CleanUp include: cleanup.yml when: - - stack_state == 'present' + - openshift_openstack_stack_state == 'present' # TODO(shadower): create the registry and PV Cinder volumes if specified # and include the `prepare-and-format-cinder-volume` tasks to set it up diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index b5869feff..32c6b5838 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,4 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index d8b4a0276..1bf366bdc 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,7 +1,7 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" STORAGE_DRIVER=overlay2 -CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" -CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_NAME="{{ openshift_openstack_container_storage_setup.container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ openshift_openstack_container_storage_setup.container_root_lv_mount_path }}" CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 28634f9a4..bfa65b460 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -54,7 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -72,11 +72,11 @@ outputs: {% endif %} conditions: - no_floating: {% if openstack_provider_network_name or openstack_use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %} resources: -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} net: type: OS::Neutron::Net properties: @@ -84,7 +84,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} subnet: type: OS::Neutron::Subnet @@ -93,26 +93,26 @@ resources: str_replace: template: openshift-ansible-cluster_id-subnet params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} network: { get_resource: net } cidr: str_replace: template: subnet_24_prefix.0/24 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} allocation_pools: - start: str_replace: template: subnet_24_prefix.3 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} end: str_replace: template: subnet_24_prefix.254 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} dns_nameservers: -{% for nameserver in openstack_dns_nameservers %} +{% for nameserver in openshift_openstack_dns_nameservers %} - {{ nameserver }} {% endfor %} @@ -120,13 +120,13 @@ resources: data_net: type: OS::Neutron::Net properties: - name: openshift-ansible-{{ stack_name }}-data-net + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-net port_security_enabled: false data_subnet: type: OS::Neutron::Subnet properties: - name: openshift-ansible-{{ stack_name }}-data-subnet + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-subnet network: { get_resource: data_net } cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} gateway_ip: null @@ -139,9 +139,9 @@ resources: str_replace: template: openshift-ansible-cluster_id-router params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} external_gateway_info: - network: {{ openstack_external_network_name }} + network: {{ openshift_openstack_external_network_name }} interface: type: OS::Neutron::RouterInterface @@ -158,8 +158,8 @@ resources: # str_replace: # template: openshift-ansible-cluster_id-keypair # params: -# cluster_id: {{ stack_name }} -# public_key: {{ openstack_keypair_name }} +# cluster_id: {{ openshift_openstack_stack_name }} +# public_key: {{ openshift_openstack_keypair_name }} common-secgrp: type: OS::Neutron::SecurityGroup @@ -168,30 +168,30 @@ resources: str_replace: template: openshift-ansible-cluster_id-common-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Basic ssh/icmp security group for cluster_id OpenShift cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp port_range_min: 22 port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if openstack_use_bastion|bool %} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} +{% if openshift_openstack_use_bastion|bool %} - direction: ingress protocol: tcp port_range_min: 22 port_range_max: 22 - remote_ip_prefix: {{ bastion_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }} {% endif %} - direction: ingress protocol: icmp - remote_ip_prefix: {{ ssh_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} flat-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -199,12 +199,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-flat-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -280,12 +280,12 @@ resources: protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% else %} master-secgrp: type: OS::Neutron::SecurityGroup @@ -294,12 +294,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-master-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster master params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -355,12 +355,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-etcd-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id etcd cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -381,12 +381,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-node-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster nodes params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -412,12 +412,12 @@ resources: protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% endif %} infra-secgrp: @@ -427,12 +427,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-infra-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift infrastructure cluster nodes params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -443,7 +443,7 @@ resources: port_range_min: 443 port_range_max: 443 -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -451,67 +451,67 @@ resources: str_replace: template: openshift-ansible-cluster_id-dns-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id cluster DNS params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: udp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: udp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% endif %} -{% if openstack_num_masters|int > 1 or openshift_ui_ssh_tunnel|bool %} +{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: - name: openshift-ansible-{{ stack_name }}-lb-secgrp - description: Security group for {{ stack_name }} cluster Load Balancer + name: openshift-ansible-{{ openshift_openstack_stack_name }}-lb-secgrp + description: Security group for {{ openshift_openstack_stack_name }} cluster Load Balancer rules: - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if openshift_ui_ssh_tunnel|bool %} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} +{% if openshift_openstack_ui_ssh_tunnel|bool %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ ssh_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} {% endif %} {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} {% endif %} {% endif %} etcd: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_etcd }} + count: {{ openshift_openstack_num_etcd }} resource_def: type: server.yaml properties: @@ -519,23 +519,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_etcd_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_etcd_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: etcds - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: etcd - image: {{ openstack_etcd_image }} - flavor: {{ openstack_etcd_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_etcd_image }} + flavor: {{ openshift_openstack_etcd_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -543,40 +543,40 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_etcd_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_etcd_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if openstack_master_server_group_policies|length > 0 %} +{% if openshift_openstack_master_server_group_policies|length > 0 %} master_server_group: type: OS::Nova::ServerGroup properties: name: master_server_group - policies: {{ openstack_master_server_group_policies }} + policies: {{ openshift_openstack_master_server_group_policies }} {% endif %} -{% if openstack_infra_server_group_policies|length > 0 %} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} infra_server_group: type: OS::Nova::ServerGroup properties: name: infra_server_group - policies: {{ openstack_infra_server_group_policies }} + policies: {{ openshift_openstack_infra_server_group_policies }} {% endif %} -{% if openstack_num_masters|int > 1 %} +{% if openshift_openstack_num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -588,23 +588,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_lb_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_lb_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: lb - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: lb - image: {{ openstack_lb_image }} - flavor: {{ openstack_lb_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_lb_image }} + flavor: {{ openshift_openstack_lb_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -612,16 +612,16 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_lb_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_lb_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -630,7 +630,7 @@ resources: masters: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_masters }} + count: {{ openshift_openstack_num_masters }} resource_def: type: server.yaml properties: @@ -638,23 +638,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_master_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_master_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: masters - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: master - image: {{ openstack_master_image }} - flavor: {{ openstack_master_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_master_image }} + flavor: {{ openshift_openstack_master_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -662,7 +662,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -670,12 +670,12 @@ resources: {% endif %} {% endif %} secgrp: -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if openstack_num_etcd|int == 0 %} +{% if openshift_openstack_num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -684,16 +684,16 @@ resources: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_master_volume_size }} -{% if openstack_master_server_group_policies|length > 0 %} + volume_size: {{ openshift_openstack_master_volume_size }} +{% if openshift_openstack_master_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: master_server_group } {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -701,9 +701,9 @@ resources: compute_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_nodes }} + count: {{ openshift_openstack_num_nodes }} removal_policies: - - resource_list: {{ openstack_nodes_to_remove }} + - resource_list: {{ openshift_openstack_nodes_to_remove }} resource_def: type: server.yaml properties: @@ -711,28 +711,28 @@ resources: str_replace: template: sub_type_k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ openstack_node_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_node_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: nodes - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: node subtype: app node_labels: -{% for k, v in openshift_cluster_node_labels.app.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image }} - flavor: {{ openstack_node_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_node_image }} + flavor: {{ openshift_openstack_node_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -740,7 +740,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -748,18 +748,18 @@ resources: {% endif %} {% endif %} secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_node_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_node_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -767,7 +767,7 @@ resources: infra_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_infra }} + count: {{ openshift_openstack_num_infra }} resource_def: type: server.yaml properties: @@ -775,28 +775,28 @@ resources: str_replace: template: sub_type_k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ openstack_infra_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_infra_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: infra - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: node subtype: infra node_labels: -{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image }} - flavor: {{ openstack_infra_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_infra_image }} + flavor: {{ openshift_openstack_infra_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -804,7 +804,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -813,34 +813,34 @@ resources: {% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } {% endif %} -{% if openshift_ui_ssh_tunnel|bool and openstack_num_masters|int < 2 %} +{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_infra_volume_size }} -{% if openstack_infra_server_group_policies|length > 0 %} + volume_size: {{ openshift_openstack_infra_volume_size }} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: infra_server_group } {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_dns }} + count: {{ openshift_openstack_num_dns }} resource_def: type: server.yaml properties: @@ -848,23 +848,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_dns_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_dns_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: dns - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: dns - image: {{ openstack_dns_image }} - flavor: {{ openstack_dns_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_dns_image }} + flavor: {{ openshift_openstack_dns_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -872,16 +872,16 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - { get_resource: dns-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_dns_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_dns_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 160345baf..a829da34f 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -61,7 +61,7 @@ parameters: label: Net name description: Net name -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} subnet: type: string label: Subnet ID @@ -81,7 +81,7 @@ parameters: label: Net ID description: Net resource -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} data_subnet: type: string default: '' @@ -102,7 +102,7 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} floating_network: type: string default: '' @@ -156,7 +156,7 @@ outputs: - server - addresses - { get_param: net_name } -{% if openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} - 0 {% else %} - 1 @@ -226,7 +226,7 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} fixed_ips: - subnet: { get_param: subnet } {% endif %} @@ -239,13 +239,13 @@ resources: properties: network: { get_param: data_net } port_security_enabled: false -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} fixed_ips: - subnet: { get_param: data_subnet } {% endif %} {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} floating-ip: condition: { not: no_floating } type: OS::Neutron::FloatingIP @@ -254,7 +254,7 @@ resources: port_id: { get_resource: port } {% endif %} -{% if not ephemeral_volumes|default(false)|bool %} +{% if not openshift_openstack_ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: -- cgit v1.2.1 From 67791867abbeb06c9bd11a1583ab6b976902fd15 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 18:08:03 +0100 Subject: Fix tox --- roles/openshift_openstack/defaults/main.yml | 1 - roles/openshift_openstack/tasks/populate-dns.yml | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'roles') diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 3eca52963..5f182e0d6 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -51,7 +51,6 @@ openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == openshift_openstack_app_subdomain: "apps" - # heat vars openshift_openstack_clusterid: openshift openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index 080c3aca9..c03aceb94 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,3 +1,4 @@ +--- - name: "Generate list of private A records" set_fact: private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" @@ -119,8 +120,8 @@ # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - - "{{ openshift_openstack_dns_records_add | default({}) }}" - - entries + - "{{ openshift_openstack_dns_records_add | default({}) }}" + - entries register: nsupdate_add_result until: nsupdate_add_result|succeeded retries: 10 -- cgit v1.2.1 From 2e9d134d4564d87dbbc7853b07204f7f44ee01e6 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 7 Nov 2017 14:42:43 +1100 Subject: Remove an unused retry file --- roles/hostnames/test/test.retry | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 roles/hostnames/test/test.retry (limited to 'roles') diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry deleted file mode 100644 index 63fc08e4c..000000000 --- a/roles/hostnames/test/test.retry +++ /dev/null @@ -1,3 +0,0 @@ -192.168.124.117 -192.168.124.40 -192.168.124.41 -- cgit v1.2.1