summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomas Sedovic <tomas@sedovic.cz>2017-11-07 14:17:27 +1100
committerTomas Sedovic <tomas@sedovic.cz>2017-11-07 14:17:27 +1100
commit6f4d509817f200ec2a273a097f4f048da5997925 (patch)
treeebddc919d850ec5c4d308613661063b01ae89784
parent0cf8cf65a89ad7cac8c1cef1f743426b610adae0 (diff)
parent332f131e8e6457a03a4f1ab19abc8e4ceb897307 (diff)
downloadopenshift-6f4d509817f200ec2a273a097f4f048da5997925.tar.gz
openshift-6f4d509817f200ec2a273a097f4f048da5997925.tar.bz2
openshift-6f4d509817f200ec2a273a097f4f048da5997925.tar.xz
openshift-6f4d509817f200ec2a273a097f4f048da5997925.zip
Merge ../openshift-ansible-contrib into openstack-provider-githist
This moves all the OpenStack-related code from the -contrib[1] repo including its git history to openshift-ansible. It will then be moved around and updated to fit the rest of the project's structure. [1]: https://github.com/openshift/openshift-ansible-contrib
-rw-r--r--playbooks/provisioning/openstack/README.md258
-rw-r--r--playbooks/provisioning/openstack/advanced-configuration.md773
-rw-r--r--playbooks/provisioning/openstack/ansible.cfg24
-rw-r--r--playbooks/provisioning/openstack/custom-actions/add-cas.yml13
-rw-r--r--playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml90
-rw-r--r--playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml13
-rw-r--r--playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml12
-rw-r--r--playbooks/provisioning/openstack/custom_flavor_check.yaml9
-rw-r--r--playbooks/provisioning/openstack/custom_image_check.yaml9
-rw-r--r--playbooks/provisioning/openstack/galaxy-requirements.yaml10
-rw-r--r--playbooks/provisioning/openstack/net_vars_check.yaml14
-rw-r--r--playbooks/provisioning/openstack/post-install.yml57
-rw-r--r--playbooks/provisioning/openstack/post-provision-openstack.yml118
-rw-r--r--playbooks/provisioning/openstack/pre-install.yml19
-rw-r--r--playbooks/provisioning/openstack/pre_tasks.yml53
-rw-r--r--playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml67
-rw-r--r--playbooks/provisioning/openstack/prerequisites.yml123
-rw-r--r--playbooks/provisioning/openstack/provision-openstack.yml35
-rw-r--r--playbooks/provisioning/openstack/provision.yaml4
l---------playbooks/provisioning/openstack/roles1
-rw-r--r--playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml59
-rw-r--r--playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml166
-rwxr-xr-xplaybooks/provisioning/openstack/sample-inventory/inventory.py88
-rw-r--r--playbooks/provisioning/openstack/scale-up.yaml75
-rw-r--r--playbooks/provisioning/openstack/stack_params.yaml49
-rw-r--r--roles/common/defaults/main.yml6
-rw-r--r--roles/dns-records/defaults/main.yml2
-rw-r--r--roles/dns-records/tasks/main.yml121
-rw-r--r--roles/dns-server-detect/defaults/main.yml3
-rw-r--r--roles/dns-server-detect/tasks/main.yml36
-rw-r--r--roles/dns-views/defaults/main.yml4
-rw-r--r--roles/dns-views/tasks/main.yml30
-rw-r--r--roles/docker-storage-setup/defaults/main.yaml7
-rw-r--r--roles/docker-storage-setup/tasks/main.yaml46
-rw-r--r--roles/docker-storage-setup/templates/docker-storage-setup-dm.j24
-rw-r--r--roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j27
-rw-r--r--roles/hostnames/tasks/main.yaml26
-rw-r--r--roles/hostnames/test/inv12
l---------roles/hostnames/test/roles1
-rw-r--r--roles/hostnames/test/test.retry3
-rw-r--r--roles/hostnames/test/test.yaml4
-rw-r--r--roles/hostnames/vars/main.yaml2
-rw-r--r--roles/hostnames/vars/records.yaml28
-rw-r--r--roles/node-network-manager/tasks/main.yml22
-rw-r--r--roles/openshift-prep/defaults/main.yml13
-rw-r--r--roles/openshift-prep/tasks/main.yml4
-rw-r--r--roles/openshift-prep/tasks/prerequisites.yml37
-rw-r--r--roles/openstack-create-cinder-registry/tasks/main.yaml5
-rw-r--r--roles/openstack-stack/README.md9
-rw-r--r--roles/openstack-stack/defaults/main.yml21
-rw-r--r--roles/openstack-stack/meta/main.yml3
-rw-r--r--roles/openstack-stack/tasks/cleanup.yml6
-rw-r--r--roles/openstack-stack/tasks/generate-templates.yml26
-rw-r--r--roles/openstack-stack/tasks/main.yml27
-rw-r--r--roles/openstack-stack/tasks/subnet_update_dns_servers.yaml9
-rw-r--r--roles/openstack-stack/templates/heat_stack.yaml.j2888
-rw-r--r--roles/openstack-stack/templates/heat_stack_server.yaml.j2270
-rw-r--r--roles/openstack-stack/templates/user_data.j213
l---------roles/openstack-stack/test/roles1
-rw-r--r--roles/openstack-stack/test/stack-create-test.yml18
-rw-r--r--roles/static_inventory/defaults/main.yml29
-rw-r--r--roles/static_inventory/meta/main.yml3
-rw-r--r--roles/static_inventory/tasks/checkpoint.yml17
-rw-r--r--roles/static_inventory/tasks/filter_out_new_app_nodes.yaml15
-rw-r--r--roles/static_inventory/tasks/main.yml25
-rw-r--r--roles/static_inventory/tasks/openstack.yml120
-rw-r--r--roles/static_inventory/tasks/sshconfig.yml13
-rw-r--r--roles/static_inventory/tasks/sshtun.yml15
-rw-r--r--roles/static_inventory/templates/inventory.j2104
-rw-r--r--roles/static_inventory/templates/openstack_ssh_config.j221
-rw-r--r--roles/static_inventory/templates/ssh-tunnel.service.j220
-rw-r--r--roles/subscription-manager/README.md156
-rw-r--r--roles/subscription-manager/pre_tasks/pre_tasks.yml45
-rw-r--r--roles/subscription-manager/tasks/main.yml150
74 files changed, 4586 insertions, 0 deletions
diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md
new file mode 100644
index 000000000..a2f553f4c
--- /dev/null
+++ b/playbooks/provisioning/openstack/README.md
@@ -0,0 +1,258 @@
+# OpenStack Provisioning
+
+This directory contains [Ansible][ansible] playbooks and roles to create
+OpenStack resources (servers, networking, volumes, security groups,
+etc.). The result is an environment ready for OpenShift installation
+via [openshift-ansible].
+
+We provide everything necessary to be able to install OpenShift on
+OpenStack (including the DNS and load balancer servers when
+necessary). In addition, we work on providing integration with the
+OpenStack-native services (storage, lbaas, baremetal as a service,
+dns, etc.).
+
+
+## OpenStack Requirements
+
+Before you start the installation, you need to have an OpenStack
+environment to connect to. You can use a public cloud or an OpenStack
+within your organisation. It is also possible to
+use [Devstack][devstack] or [TripleO][tripleo]. In the case of
+TripleO, we will be running on top of the **overcloud**.
+
+The OpenStack release must be Newton (for Red Hat OpenStack this is
+version 10) or newer. It must also satisfy these requirements:
+
+* Heat (Orchestration) must be available
+* The deployment image (CentOS 7 or RHEL 7) must be loaded
+* The deployment flavor must be available to your user
+ - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing
+ - look at
+ the [Minimum Hardware Requirements page][hardware-requirements]
+ for production
+* The keypair for SSH must be available in openstack
+* `keystonerc` file that lets you talk to the openstack services
+ * NOTE: only Keystone V2 is currently supported
+
+Optional:
+* External Neutron network with a floating IP address pool
+
+
+## Installation
+
+There are four main parts to the installation:
+
+1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies)
+2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster)
+3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc)
+4. [Installing OpenShift](#4-installing-openshift)
+
+This guide is going to install [OpenShift Origin][origin]
+with [CentOS 7][centos7] images with minimal customisation.
+
+We will create the VMs for running OpenShift, in a new Neutron
+network, assign Floating IP addresses and configure DNS.
+
+The OpenShift cluster will have a single Master node that will run
+`etcd`, a single Infra node and two App nodes.
+
+You can look at
+the [Advanced Configuration page][advanced-configuration] for
+additional options.
+
+
+
+### 1. Preparing Ansible and dependencies
+
+First, you need to select where to run [Ansible][ansible] from (the
+*Ansible host*). This can be the computer you read this guide on or an
+OpenStack VM you'll create specifically for this purpose.
+
+We will use
+a
+[Docker image that has all the dependencies installed][control-host-image] to
+make things easier. If you don't want to use Docker, take a look at
+the [Ansible host dependencies][ansible-dependencies] and make sure
+they're installed.
+
+Your *Ansible host* needs to have the following:
+
+1. Docker
+2. `keystonerc` file with your OpenStack credentials
+3. SSH private key for logging in to your OpenShift nodes
+
+Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your
+current directory:
+
+```bash
+$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \
+ -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \
+ redhatcop/control-host-openstack bash
+```
+
+This will create the container, add your SSH key and source your
+`keystonerc`. It should be set up for the installation.
+
+You can verify that everything is in order:
+
+
+```bash
+$ less .ssh/id_rsa
+$ ansible --version
+$ openstack image list
+```
+
+
+### 2. Configuring the OpenStack Environment and OpenShift Cluster
+
+The configuration is all done in an Ansible inventory directory. We
+will clone the [openshift-ansible-contrib][contrib] repository and set
+things up for a minimal installation.
+
+
+```
+$ git clone https://github.com/openshift/openshift-ansible-contrib
+$ cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ inventory
+```
+
+If you're testing multiple configurations, you can have multiple
+inventories and switch between them.
+
+#### OpenStack Configuration
+
+The OpenStack configuration is in `inventory/group_vars/all.yml`.
+
+Open the file and plug in the image, flavor and network configuration
+corresponding to your OpenStack installation.
+
+```bash
+$ vi inventory/group_vars/all.yml
+```
+
+1. Set the `openstack_ssh_public_key` to your OpenStack keypair name.
+ - See `openstack keypair list` to find the keypairs registered with
+ OpenShift.
+ - This must correspond to your private SSH key in `~/.ssh/id_rsa`
+2. Set the `openstack_external_network_name` to the floating IP
+ network of your openstack.
+ - See `openstack network list` for the list of networks.
+ - It's often called `public`, `external` or `ext-net`.
+3. Set the `openstack_default_image_name` to the image you want your
+ OpenShift VMs to run.
+ - See `openstack image list` for the list of available images.
+4. Set the `openstack_default_flavor` to the flavor you want your
+ OpenShift VMs to use.
+ - See `openstack flavor list` for the list of available flavors.
+
+**NOTE**: In most OpenStack environments, you will also need to
+configure the forwarders for the DNS server we create. This depends on
+your environment.
+
+Launch a VM in your OpenStack and look at its `/etc/resolv.conf` and
+put the IP addresses into `public_dns_nameservers` in
+`inventory/group_vars/all.yml`.
+
+
+#### OpenShift configuration
+
+The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`.
+
+The default options will mostly work, but unless you used the large
+flavors for a production-ready environment, openshift-ansible's
+hardware check will fail.
+
+Let's disable those checks by putting this in
+`inventory/group_vars/OSEv3.yml`:
+
+```yaml
+openshift_disable_check: disk_availability,memory_availability
+```
+
+**NOTE**: The default authentication method will allow **any username
+and password** in! If you're running this in a public place, you need
+to set up access control.
+
+Feel free to look at
+the [Sample OpenShift Inventory][sample-openshift-inventory] and
+the [advanced configuration][advanced-configuration].
+
+
+### 3. Creating the OpenStack resources (VMs, networking, etc.)
+
+We will install the DNS server roles using ansible galaxy and then run
+the openstack provisioning playbook. The `ansible.cfg` file we provide
+has useful defaults -- copy it to the directory you're going to run
+Ansible from.
+
+```bash
+$ ansible-galaxy install -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml -p openshift-ansible-contrib/roles
+$ cp openshift-ansible-contrib/playbooks/provisioning/openstack/ansible.cfg ansible.cfg
+```
+(you will only need to do this once)
+
+Then run the provisioning playbook -- this will create the OpenStack
+resources:
+
+```bash
+$ ansible-playbook -i inventory openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
+```
+
+If you're using multiple inventories, make sure you pass the path to
+the right one to `-i`.
+
+
+### 4. Installing OpenShift
+
+We will use the `openshift-ansible` project to install openshift on
+top of the OpenStack nodes we have prepared:
+
+```bash
+$ git clone https://github.com/openshift/openshift-ansible
+$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml
+```
+
+
+### Next Steps
+
+And that's it! You should have a small but functional OpenShift
+cluster now.
+
+Take a look at [how to access the cluster][accessing-openshift]
+and [how to remove it][uninstall-openshift] as well as the more
+advanced configuration:
+
+* [Accessing the OpenShift cluster][accessing-openshift]
+* [Removing the OpenShift cluster][uninstall-openshift]
+* Set Up Authentication (TODO)
+* [Multiple Masters with a load balancer][loadbalancer]
+* [External Dns][external-dns]
+* Multiple Clusters (TODO)
+* [Cinder Registry][cinder-registry]
+* [Bastion Node][bastion]
+
+
+[ansible]: https://www.ansible.com/
+[openshift-ansible]: https://github.com/openshift/openshift-ansible
+[devstack]: https://docs.openstack.org/devstack/
+[tripleo]: http://tripleo.org/
+[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node
+[contrib]: https://github.com/openshift/openshift-ansible-contrib
+[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/
+[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
+[origin]: https://www.openshift.org/
+[centos7]: https://www.centos.org/
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example
+[advanced-configuration]: ./advanced-configuration.md
+[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
+[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
+[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
+[external-dns]: ./advanced-configuration.md#dns-configuration-variables
+[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
+[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node
+
+
+
+## License
+
+Like the rest of the openshift-ansible-contrib repository, the code
+here is licensed under Apache 2.
diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md
new file mode 100644
index 000000000..72bb95254
--- /dev/null
+++ b/playbooks/provisioning/openstack/advanced-configuration.md
@@ -0,0 +1,773 @@
+## Dependencies for localhost (ansible control/admin node)
+
+* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
+* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
+* [jinja2](http://jinja.pocoo.org/docs/2.9/)
+* [shade](https://pypi.python.org/pypi/shade)
+* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
+* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
+* Become (sudo) is not required.
+
+**NOTE**: You can use a Docker image with all dependencies set up.
+Find more in the [Deployment section](#deployment).
+
+### Optional Dependencies for localhost
+**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages.
+
+* `python-openstackclient`
+* `python-heatclient`
+
+## Dependencies for OpenStack hosted cluster nodes (servers)
+
+There are no additional dependencies for the cluster nodes. Required
+configuration steps are done by Heat given a specific user data config
+that normally should not be changed.
+
+## Required galaxy modules
+
+In order to pull in external dependencies for DNS configuration steps,
+the following commads need to be executed:
+
+ ansible-galaxy install \
+ -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \
+ -p openshift-ansible-contrib/roles
+
+Alternatively you can install directly from github:
+
+ ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \
+ -p openshift-ansible-contrib/roles
+
+Notes:
+* This assumes we're in the directory that contains the clonned
+openshift-ansible-contrib repo in its root path.
+* When trying to install a different version, the previous one must be removed first
+(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)).
+Otherwise, even if there are differences between the two versions, installation of the newer version is skipped.
+
+
+## Accessing the OpenShift Cluster
+
+### Use the Cluster DNS
+
+In addition to the OpenShift nodes, we created a DNS server with all
+the necessary entries. We will configure your *Ansible host* to use
+this new DNS and talk to the deployed OpenShift.
+
+First, get the DNS IP address:
+
+```bash
+$ openstack server show dns-0.openshift.example.com --format value --column addresses
+openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129
+```
+
+Note the floating IP address (it's `10.40.128.129` in this case) -- if
+you're not sure, try pinging them both -- it's the one that responds
+to pings.
+
+Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your
+**first entry**.
+
+If your `/etc/resolv.conf` currently looks like this:
+
+```
+; generated by /usr/sbin/dhclient-script
+search openstacklocal
+nameserver 192.168.0.3
+nameserver 192.168.0.2
+```
+
+Change it to this:
+
+```
+; generated by /usr/sbin/dhclient-script
+search openstacklocal
+nameserver 10.40.128.129
+nameserver 192.168.0.3
+nameserver 192.168.0.2
+```
+
+### Get the `oc` Client
+
+**NOTE**: You can skip this section if you're using the Docker image
+-- it already has the `oc` binary.
+
+You need to download the OpenShift command line client (called `oc`).
+You can download and extract `openshift-origin-client-tools` from the
+OpenShift release page:
+
+https://github.com/openshift/origin/releases/latest/
+
+Or you can now copy it from the master node:
+
+ $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc"
+
+Either way, find the `oc` binary and put it in your `PATH`.
+
+
+### Logging in Using the Command Line
+
+
+```
+oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password
+oc new-project test
+oc new-app --template=cakephp-mysql-example
+oc status -v
+curl http://cakephp-mysql-example-test.apps.openshift.example.com
+```
+
+This will trigger an image build. You can run `oc logs -f
+bc/cakephp-mysql-example` to follow its progress.
+
+Wait until the build has finished and both pods are deployed and running:
+
+```
+$ oc status -v
+In project test on server https://master-0.openshift.example.com:8443
+
+http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example)
+ dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <-
+ bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0
+ deployment #1 deployed about a minute ago - 1 pod
+
+svc/mysql - 172.30.144.36:3306
+ dc/mysql deploys openshift/mysql:5.7
+ deployment #1 deployed 3 minutes ago - 1 pod
+
+Info:
+ * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running.
+ try: oc set probe pod/cakephp-mysql-example-1-build --liveness ...
+View details with 'oc describe <resource>/<name>' or list everything with 'oc get all'.
+
+```
+
+You can now look at the deployed app using its route:
+
+```
+$ curl http://cakephp-mysql-example-test.apps.openshift.example.com
+```
+
+Its `title` should say: "Welcome to OpenShift".
+
+
+### Accessing the UI
+
+You can also access the OpenShift cluster with a web browser by going to:
+
+https://master-0.openshift.example.com:8443
+
+Note that for this to work, the OpenShift nodes must be accessible
+from your computer and it's DNS configuration must use the cruster's
+DNS.
+
+
+## Removing the OpenShift Cluster
+
+Everything in the cluster is contained within a Heat stack. To
+completely remove the cluster and all the related OpenStack resources,
+run this command:
+
+```bash
+openstack stack delete --wait --yes openshift.example.com
+```
+
+
+## DNS configuration variables
+
+Pay special attention to the values in the first paragraph -- these
+will depend on your OpenStack environment.
+
+Note that the provsisioning playbooks update the original Neutron subnet
+created with the Heat stack to point to the configured DNS servers.
+So the provisioned cluster nodes will start using those natively as
+default nameservers. Technically, this allows to deploy OpenShift clusters
+without dnsmasq proxies.
+
+The `env_id` and `public_dns_domain` will form the cluster's DNS domain all
+your servers will be under. With the default values, this will be
+`openshift.example.com`. For workloads, the default subdomain is 'apps'.
+That sudomain can be set as well by the `openshift_app_domain` variable in
+the inventory.
+
+The `openstack_<role name>_hostname` is a set of variables used for customising
+hostnames of servers with a given role. When such a variable stays commented,
+default hostname (usually the role name) is used.
+
+The `public_dns_nameservers` is a list of DNS servers accessible from all
+the created Nova servers. These will be serving as your DNS forwarders for
+external FQDNs that do not belong to the cluster's DNS domain and its subdomains.
+If you're unsure what to put in here, you can try the google or opendns servers,
+but note that some organizations may be blocking them.
+
+The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not.
+By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file
+first nameserver entry that points to the local host instance of the dnsmasq
+daemon that in turn proxies DNS requests to the authoritative DNS server.
+When Network Manager is enabled for provisioned cluster nodes, which is
+normally the case, you should not change the defaults and always deploy dnsmasq.
+
+`external_nsupdate_keys` describes an external authoritative DNS server(s)
+processing dynamic records updates in the public and private cluster views:
+
+ external_nsupdate_keys:
+ public:
+ key_secret: <some nsupdate key>
+ key_algorithm: 'hmac-md5'
+ key_name: 'update-key'
+ server: <public DNS server IP>
+ private:
+ key_secret: <some nsupdate key 2>
+ key_algorithm: 'hmac-sha256'
+ server: <public or private DNS server IP>
+
+Here, for the public view section, we specified another key algorithm and
+optional `key_name`, which normally defaults to the cluster's DNS domain.
+This just illustrates a compatibility mode with a DNS service deployed
+by OpenShift on OSP10 reference architecture, and used in a mixed mode with
+another external DNS server.
+
+Another example defines an external DNS server for the public view
+additionally to the in-stack DNS server used for the private view only:
+
+ external_nsupdate_keys:
+ public:
+ key_secret: <some nsupdate key>
+ key_algorithm: 'hmac-sha256'
+ server: <public DNS server IP>
+
+Here, updates matching the public view will be hitting the given public
+server IP. While updates matching the private view will be sent to the
+auto evaluated in-stack DNS server's **public** IP.
+
+Note, for the in-stack DNS server, private view updates may be sent only
+via the public IP of the server. You can not send updates via the private
+IP yet. This forces the in-stack private server to have a floating IP.
+See also the [security notes](#security-notes)
+
+## Flannel networking
+
+In order to configure the
+[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel),
+uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars.
+Note that the `osm_cluster_network_cidr` must not overlap with the default
+Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default
+CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to
+`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`.
+
+Also note that the flannel network will be provisioned on a separate isolated Neutron
+subnet defined from `osm_cluster_network_cidr` and having ports security disabled.
+Use the `openstack_private_data_network_name` variable to define the network
+name for the heat stack resource.
+
+After the cluster deployment done, you should run an additional post installation
+step for flannel and docker iptables configuration:
+
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml
+
+## Other configuration variables
+
+`openstack_ssh_public_key` is a Nova keypair - you can see your
+keypairs with `openstack keypair list`. It must correspond to the
+private SSH key Ansible will use to log into the created VMs. This is
+`~/.ssh/id_rsa` by default, but you can use a different key by passing
+`--private-key` to `ansible-playbook`.
+
+`openstack_default_image_name` is the default name of the Glance image the
+servers will use. You can see your images with `openstack image list`.
+In order to set a different image for a role, uncomment the line with the
+corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and
+set its value to another available image name. `openstack_default_image_name`
+must stay defined as it is used as a default value for the rest of the roles.
+
+`openstack_default_flavor` is the default Nova flavor the servers will use.
+You can see your flavors with `openstack flavor list`.
+In order to set a different flavor for a role, uncomment the line with the
+corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and
+set its value to another available flavor. `openstack_default_flavor` must
+stay defined as it is used as a default value for the rest of the roles.
+
+`openstack_external_network_name` is the name of the Neutron network
+providing external connectivity. It is often called `public`,
+`external` or `ext-net`. You can see your networks with `openstack
+network list`.
+
+`openstack_private_network_name` is the name of the private Neutron network
+providing admin/control access for ansible. It can be merged with other
+cluster networks, there are no special requirements for networking.
+
+The `openstack_num_masters`, `openstack_num_infra` and
+`openstack_num_nodes` values specify the number of Master, Infra and
+App nodes to create.
+
+The `openshift_cluster_node_labels` defines custom labels for your openshift
+cluster node groups. It currently supports app and infra node groups.
+The default value of this variable sets `region: primary` to app nodes and
+`region: infra` to infra nodes.
+An example of setting a customised label:
+```
+openshift_cluster_node_labels:
+ app:
+ mylabel: myvalue
+```
+
+The `openstack_nodes_to_remove` allows you to specify the numerical indexes
+of App nodes that should be removed; for example, ['0', '2'],
+
+The `docker_volume_size` is the default Docker volume size the servers will use.
+In order to set a different volume size for a role,
+uncomment the line with the corresponding variable (e. g. `docker_master_volume_size`
+for master) and change its value. `docker_volume_size` must stay defined as it is
+used as a default value for some of the servers (master, infra, app node).
+The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded.
+
+**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables
+will be ignored and the deployment will not create any cinder volumes.
+
+The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat
+stacks. Set it to true, if you experience issues with sec group rules
+quotas. It trades security for number of rules, by sharing the same set
+of firewall rules for master, node, etcd and infra nodes.
+
+The `required_packages` variable also provides a list of the additional
+prerequisite packages to be installed before to deploy an OpenShift cluster.
+Those are ignored though, if the `manage_packages: False`.
+
+The `openstack_inventory` controls either a static inventory will be created after the
+cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory
+is yet to be supported, so the static inventory will be created anyway.
+
+The `openstack_inventory_path` points the directory to host the generated static inventory.
+It should point to the copied example inventory directory, otherwise ti creates
+a new one for you.
+
+## Multi-master configuration
+
+Please refer to the official documentation for the
+[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters)
+and define the corresponding [inventory
+variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables)
+in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node
+under the ansible group named `ext_lb`:
+
+ openshift_master_cluster_method: native
+ openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"
+ openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}"
+
+## Provider Network
+
+Normally, the playbooks create a new Neutron network and subnet and attach
+floating IP addresses to each node. If you have a provider network set up, this
+is all unnecessary as you can just access servers that are placed in the
+provider network directly.
+
+To use a provider network, set its name in `openstack_provider_network_name` in
+`inventory/group_vars/all.yml`.
+
+If you set the provider network name, the `openstack_external_network_name` and
+`openstack_private_network_name` fields will be ignored.
+
+**NOTE**: this will not update the nodes' DNS, so running openshift-ansible
+right after provisioning will fail (unless you're using an external DNS server
+your provider network knows about). You must make sure your nodes are able to
+resolve each other by name.
+
+## Security notes
+
+Configure required `*_ingress_cidr` variables to restrict public access
+to provisioned servers from your laptop (a /32 notation should be used)
+or your trusted network. The most important is the `node_ingress_cidr`
+that restricts public access to the deployed DNS server and cluster
+nodes' ephemeral ports range.
+
+Note, the command ``curl https://api.ipify.org`` helps fiding an external
+IP address of your box (the ansible admin node).
+
+There is also the `manage_packages` variable (defaults to True) you
+may want to turn off in order to speed up the provisioning tasks. This may
+be the case for development environments. When turned off, the servers will
+be provisioned omitting the ``yum update`` command. This brings security
+implications though, and is not recommended for production deployments.
+
+### DNS servers security options
+
+Aside from `node_ingress_cidr` restricting public access to in-stack DNS
+servers, there are following (bind/named specific) DNS security
+options available:
+
+ named_public_recursion: 'no'
+ named_private_recursion: 'yes'
+
+External DNS servers, which is not included in the 'dns' hosts group,
+are not managed. It is up to you to configure such ones.
+
+## Configure the OpenShift parameters
+
+Finally, you need to update the DNS entry in
+`inventory/group_vars/OSEv3.yml` (look at
+`openshift_master_default_subdomain`).
+
+In addition, this is the place where you can customise your OpenShift
+installation for example by specifying the authentication.
+
+The full list of options is available in this sample inventory:
+
+https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example
+
+Note, that in order to deploy OpenShift origin, you should update the following
+variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
+
+ deployment_type: origin
+ openshift_deployment_type: "{{ deployment_type }}"
+
+
+## Setting a custom entrypoint
+
+In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname`
+
+ openshift_master_cluster_public_hostname: api.openshift.example.com
+
+Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
+you cannot set this value to simply `openshift.example.com`.
+
+## Creating and using a Cinder volume for the OpenShift registry
+
+You can optionally have the playbooks create a Cinder volume and set
+it up as the OpenShift hosted registry.
+
+To do that you need specify the desired Cinder volume name and size in
+Gigabytes in `inventory/group_vars/all.yml`:
+
+ cinder_hosted_registry_name: cinder-registry
+ cinder_hosted_registry_size_gb: 10
+
+With this, the playbooks will create the volume and set up its
+filesystem. If there is an existing volume of the same name, we will
+use it but keep the existing data on it.
+
+To use the volume for the registry, you must first configure it with
+the OpenStack credentials by putting the following to `OSEv3.yml`:
+
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
+
+This will use the credentials from your shell environment. If you want
+to enter them explicitly, you can. You can also use credentials
+different from the provisioning ones (say for quota or access control
+reasons).
+
+**NOTE**: If you're testing this on (DevStack)[devstack], you must
+explicitly set your Keystone API version to v2 (e.g.
+`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default
+value provided by `openrc`. You may also encounter the following issue
+with Cinder:
+
+https://github.com/kubernetes/kubernetes/issues/50461
+
+You can read the (OpenShift documentation on configuring
+OpenStack)[openstack] for more information.
+
+[devstack]: https://docs.openstack.org/devstack/latest/
+[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html
+
+
+Next, we need to instruct OpenShift to use the Cinder volume for it's
+registry. Again in `OSEv3.yml`:
+
+ #openshift_hosted_registry_storage_kind: openstack
+ #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+ #openshift_hosted_registry_storage_openstack_filesystem: xfs
+
+The filesystem value here will be used in the initial formatting of
+the volume.
+
+If you're using the dynamic inventory, you must uncomment these two values as
+well:
+
+ #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}"
+ #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi"
+
+But note that they use the `os_cinder` lookup plugin we provide, so you must
+tell Ansible where to find it either in `ansible.cfg` (the one we provide is
+configured properly) or by exporting the
+`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment
+variable.
+
+
+
+## Use an existing Cinder volume for the OpenShift registry
+
+You can also use a pre-existing Cinder volume for the storage of your
+OpenShift registry.
+
+To do that, you need to have a Cinder volume. You can create one by
+running:
+
+ openstack volume create --size <volume size in gb> <volume name>
+
+The volume needs to have a file system created before you put it to
+use.
+
+As with the automatically-created volume, you have to set up the
+OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as
+registry values:
+
+ #openshift_hosted_registry_storage_kind: openstack
+ #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+ #openshift_hosted_registry_storage_openstack_filesystem: xfs
+ #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05
+ #openshift_hosted_registry_storage_volume_size: 10Gi
+
+Note the `openshift_hosted_registry_storage_openstack_volumeID` and
+`openshift_hosted_registry_storage_volume_size` values: these need to
+be added in addition to the previous variables.
+
+The **Cinder volume ID**, **filesystem** and **volume size** variables
+must correspond to the values in your volume. The volume ID must be
+the **UUID** of the Cinder volume, *not its name*.
+
+We can do formate the volume for you if you ask for it in
+`inventory/group_vars/all.yml`:
+
+ prepare_and_format_registry_volume: true
+
+**NOTE:** doing so **will destroy any data that's currently on the volume**!
+
+You can also run the registry setup playbook directly:
+
+ ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml
+
+(the provisioning phase must be completed, first)
+
+
+
+## Configure static inventory and access via a bastion node
+
+Example inventory variables:
+
+ openstack_use_bastion: true
+ bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24"
+ openstack_private_ssh_key: ~/.ssh/id_rsa
+ openstack_inventory: static
+ openstack_inventory_path: ../../../../inventory
+ openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com
+
+The `openstack_subnet_prefix` is the openstack private network for your cluster.
+And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes
+additionally to the `ssh_ingress_cidr`` (see the security notes above).
+
+The SSH config will be stored on the ansible control node by the
+gitven path. Ansible uses it automatically. To access the cluster nodes with
+that ssh config, use the `-F` prefix, f.e.:
+
+ ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK
+
+Note, relative paths will not work for the `openstack_ssh_config_path`, but it
+works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this
+guide, the latter points to the current directory, where you run ansible commands
+from.
+
+To verify nodes connectivity, use the command:
+
+ ansible -v -i inventory/hosts -m ping all
+
+If something is broken, double-check the inventory variables, paths and the
+generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files.
+
+The `inventory: dynamic` can be used instead to access cluster nodes directly via
+floating IPs. In this mode you can not use a bastion node and should specify
+the dynamic inventory file in your ansible commands , like `-i openstack.py`.
+
+## Using Docker on the Ansible host
+
+If you don't want to worry about the dependencies, you can use the
+[OpenStack Control Host image][control-host-image].
+
+[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/
+
+It has all the dependencies installed, but you'll need to map your
+code and credentials to it. Assuming your SSH keys live in `~/.ssh`
+and everything else is in your current directory (i.e. `ansible.cfg`,
+`keystonerc`, `inventory`, `openshift-ansible`,
+`openshift-ansible-contrib`), this is how you run the deployment:
+
+ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \
+ -v $PWD:/root/openshift:Z \
+ -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \
+ redhatcop/control-host-openstack bash
+
+(feel free to replace `$PWD` with an actual path to your inventory and
+checkouts, but note that relative paths don't work)
+
+The first run may take a few minutes while the image is being
+downloaded. After that, you'll be inside the container and you can run
+the playbooks:
+
+ cd openshift
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
+
+
+### Run the playbook
+
+Assuming your OpenStack (Keystone) credentials are in the `keystonerc`
+this is how you stat the provisioning process from your ansible control node:
+
+ . keystonerc
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
+
+Note, here you start with an empty inventory. The static inventory will be populated
+with data so you can omit providing additional arguments for future ansible commands.
+
+If bastion enabled, the generates SSH config must be applied for ansible.
+Otherwise, it is auto included by the previous step. In order to execute it
+as a separate playbook, use the following command:
+
+ ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml
+
+The first infra node then becomes a bastion node as well and proxies access
+for future ansible commands. The post-provision step also configures Satellite,
+if requested, and DNS server, and ensures other OpenShift requirements to be met.
+
+
+## Running Custom Post-Provision Actions
+
+A custom playbook can be run like this:
+
+```
+ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml
+```
+
+If you'd like to limit the run to one particular host, you can do so as follows:
+
+```
+ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com
+```
+
+You can also create your own custom playbook. Here are a few examples:
+
+### Adding additional YUM repositories
+
+```
+---
+- hosts: app
+ tasks:
+
+ # enable EPL
+ - name: Add repository
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+```
+
+This example runs against app nodes. The list of options include:
+
+ - cluster_hosts (all hosts: app, infra, masters, dns, lb)
+ - OSEv3 (app, infra, masters)
+ - app
+ - dns
+ - masters
+ - infra_hosts
+
+### Attaching additional RHN pools
+
+```
+---
+- hosts: cluster_hosts
+ tasks:
+ - name: Attach additional RHN pool
+ become: true
+ command: "/usr/bin/subscription-manager attach --pool=<pool ID>"
+ register: attach_rhn_pool_result
+ until: attach_rhn_pool_result.rc == 0
+ retries: 10
+ delay: 1
+```
+
+This playbook runs against all cluster nodes. In order to help prevent slow connectivity
+problems, the task is retried 10 times in case of initial failure.
+Note that in order for this example to work in your deployment, your servers must use the RHEL image.
+
+### Adding extra Docker registry URLs
+
+This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory.
+
+It adds URLs passed as arguments to the docker configuration program.
+Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable
+([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30))
+and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items
+([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)).
+The new content is then saved into the original file
+([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82))
+and docker is restarted.
+
+Example usage:
+```
+ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}'
+```
+
+### Adding extra CAs to the trust chain
+
+This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory.
+It copies passed CAs to the trust chain location and updates the trust chain on each selected host.
+
+Example usage:
+```
+ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [<absolute path to ca1 file>, <absolute path to ca2 file>]}'
+```
+
+Please consider contributing your custom playbook back to openshift-ansible-contrib!
+
+A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include:
+
+* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster
+* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster
+* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster
+* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster
+
+
+## Install OpenShift
+
+Once it succeeds, you can install openshift by running:
+
+ ansible-playbook openshift-ansible/playbooks/byo/config.yml
+
+## Access UI
+
+OpenShift UI may be accessed via the 1st master node FQDN, port 8443.
+
+When using a bastion, you may want to make an SSH tunnel from your control node
+to access UI on the `https://localhost:8443`, with this inventory variable:
+
+ openshift_ui_ssh_tunnel: True
+
+Note, this requires sudo rights on the ansible control node and an absolute path
+for the `openstack_private_ssh_key`. You should also update the control node's
+`/etc/hosts`:
+
+ 127.0.0.1 master-0.openshift.example.com
+
+In order to access UI, the ssh-tunnel service will be created and started on the
+control node. Make sure to remove these changes and the service manually, when not
+needed anymore.
+
+## Scale Deployment up/down
+
+### Scaling up
+
+One can scale up the number of application nodes by executing the ansible playbook
+`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`.
+This process can be done even if there is currently no deployment available.
+The `increment_by` variable is used to specify by how much the deployment should
+be scaled up (if none exists, it serves as a target number of application nodes).
+The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir`
+variable. Its value must be an absolute path to `openshift-ansible` and it cannot
+contain the '/' symbol at the end.
+
+Usage:
+
+```
+ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>]
+```
+
+Note: This playbook works only without a bastion node (`openstack_use_bastion: False`).
diff --git a/playbooks/provisioning/openstack/ansible.cfg b/playbooks/provisioning/openstack/ansible.cfg
new file mode 100644
index 000000000..a21f023ea
--- /dev/null
+++ b/playbooks/provisioning/openstack/ansible.cfg
@@ -0,0 +1,24 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+[defaults]
+ansible_user = openshift
+forks = 50
+# work around privilege escalation timeouts in ansible
+timeout = 30
+host_key_checking = false
+inventory = inventory
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+gathering = smart
+retry_files_enabled = false
+fact_caching = jsonfile
+fact_caching_connection = .ansible/cached_facts
+fact_caching_timeout = 900
+stdout_callback = skippy
+callback_whitelist = profile_tasks
+lookup_plugins = openshift-ansible-contrib/lookup_plugins
+
+
+[ssh_connection]
+ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no
+control_path = /var/tmp/%%h-%%r
+pipelining = True
diff --git a/playbooks/provisioning/openstack/custom-actions/add-cas.yml b/playbooks/provisioning/openstack/custom-actions/add-cas.yml
new file mode 100644
index 000000000..b2c195f91
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom-actions/add-cas.yml
@@ -0,0 +1,13 @@
+---
+- hosts: cluster_hosts
+ become: true
+ vars:
+ ca_files: []
+ tasks:
+ - name: Copy CAs to the trusted CAs location
+ with_items: "{{ ca_files }}"
+ copy:
+ src: "{{ item }}"
+ dest: /etc/pki/ca-trust/source/anchors/
+ - name: Update trusted CAs
+ shell: 'update-ca-trust enable && update-ca-trust extract'
diff --git a/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml b/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml
new file mode 100644
index 000000000..e118a71dc
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml
@@ -0,0 +1,90 @@
+---
+- hosts: OSEv3
+ become: true
+ vars:
+ registries: []
+ insecure_registries: []
+
+ tasks:
+ - name: Check if docker is even installed
+ command: docker
+
+ - name: Install atomic-registries package
+ yum:
+ name: atomic-registries
+ state: latest
+
+ - name: Get registry configuration file
+ register: file_result
+ stat:
+ path: /etc/containers/registries.conf
+
+ - name: Check if it exists
+ assert:
+ that: 'file_result.stat.exists'
+ msg: "Configuration file does not exist."
+
+ - name: Load configuration file
+ shell: cat /etc/containers/registries.conf
+ register: file_content
+
+ - name: Store file content into a variable
+ set_fact:
+ docker_conf: "{{ file_content.stdout | from_yaml }}"
+
+ - name: Make sure that docker file content is a dictionary
+ when: '(docker_conf is string) and (not docker_conf)'
+ set_fact:
+ docker_conf: {}
+
+ - name: Make sure that registries is a list
+ when: 'registries is string'
+ set_fact:
+ registries_list: [ "{{ registries }}" ]
+
+ - name: Make sure that insecure_registries is a list
+ when: 'insecure_registries is string'
+ set_fact:
+ insecure_registries_list: [ "{{ insecure_registries }}" ]
+
+ - name: Set default values if there are no registries defined
+ set_fact:
+ docker_conf_registries: "{{ [] if docker_conf['registries'] is not defined else docker_conf['registries'] }}"
+ docker_conf_insecure_registries: "{{ [] if docker_conf['insecure_registries'] is not defined else docker_conf['insecure_registries'] }}"
+
+ - name: Add other registries
+ when: 'registries_list is not defined'
+ register: registries_merge_result
+ set_fact:
+ docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries) | unique}, recursive=True) }}"
+
+ - name: Add other registries (if registries had to be converted)
+ when: 'registries_merge_result|skipped'
+ set_fact:
+ docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries_list) | unique}, recursive=True) }}"
+
+ - name: Add insecure registries
+ when: 'insecure_registries_list is not defined'
+ register: insecure_registries_merge_result
+ set_fact:
+ docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries) | unique }, recursive=True) }}"
+
+ - name: Add insecure registries (if insecure_registries had to be converted)
+ when: 'insecure_registries_merge_result|skipped'
+ set_fact:
+ docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries_list) | unique }, recursive=True) }}"
+
+ - name: Load variable back to file
+ copy:
+ content: "{{ docker_conf | to_yaml }}"
+ dest: /etc/containers/registries.conf
+
+ - name: Restart registries service
+ service:
+ name: registries
+ state: restarted
+
+ - name: Restart docker
+ service:
+ name: docker
+ state: restarted
diff --git a/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml b/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml
new file mode 100644
index 000000000..d17c1e335
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml
@@ -0,0 +1,13 @@
+---
+- hosts: cluster_hosts
+ vars:
+ rhn_pools: []
+ tasks:
+ - name: Attach additional RHN pools
+ become: true
+ with_items: "{{ rhn_pools }}"
+ command: "/usr/bin/subscription-manager attach --pool={{ item }}"
+ register: attach_rhn_pools_result
+ until: attach_rhn_pools_result.rc == 0
+ retries: 10
+ delay: 1
diff --git a/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml b/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml
new file mode 100644
index 000000000..ffebcb642
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml
@@ -0,0 +1,12 @@
+---
+- hosts: cluster_hosts
+ vars:
+ yum_repos: []
+ tasks:
+ # enable additional yum repos
+ - name: Add repository
+ yum_repository:
+ name: "{{ item.name }}"
+ description: "{{ item.description }}"
+ baseurl: "{{ item.baseurl }}"
+ with_items: "{{ yum_repos }}"
diff --git a/playbooks/provisioning/openstack/custom_flavor_check.yaml b/playbooks/provisioning/openstack/custom_flavor_check.yaml
new file mode 100644
index 000000000..e11874c28
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom_flavor_check.yaml
@@ -0,0 +1,9 @@
+---
+- name: Try to get flavor facts
+ os_flavor_facts:
+ name: "{{ flavor }}"
+ register: flavor_result
+- name: Check that custom flavor is available
+ assert:
+ that: "flavor_result.ansible_facts.openstack_flavors"
+ msg: "Flavor {{ flavor }} is not available."
diff --git a/playbooks/provisioning/openstack/custom_image_check.yaml b/playbooks/provisioning/openstack/custom_image_check.yaml
new file mode 100644
index 000000000..452e1e4d8
--- /dev/null
+++ b/playbooks/provisioning/openstack/custom_image_check.yaml
@@ -0,0 +1,9 @@
+---
+- name: Try to get image facts
+ os_image_facts:
+ image: "{{ image }}"
+ register: image_result
+- name: Check that custom image is available
+ assert:
+ that: "image_result.ansible_facts.openstack_image"
+ msg: "Image {{ image }} is not available."
diff --git a/playbooks/provisioning/openstack/galaxy-requirements.yaml b/playbooks/provisioning/openstack/galaxy-requirements.yaml
new file mode 100644
index 000000000..1d745dcc3
--- /dev/null
+++ b/playbooks/provisioning/openstack/galaxy-requirements.yaml
@@ -0,0 +1,10 @@
+---
+# This is the Ansible Galaxy requirements file to pull in the correct roles
+
+# From 'infra-ansible'
+- src: https://github.com/redhat-cop/infra-ansible
+ version: master
+
+# From 'openshift-ansible'
+- src: https://github.com/openshift/openshift-ansible
+ version: master
diff --git a/playbooks/provisioning/openstack/net_vars_check.yaml b/playbooks/provisioning/openstack/net_vars_check.yaml
new file mode 100644
index 000000000..68afde415
--- /dev/null
+++ b/playbooks/provisioning/openstack/net_vars_check.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check the provider network configuration
+ fail:
+ msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network"
+ when:
+ - openstack_provider_network_name is defined
+ - openstack_private_data_network_name is defined
+
+- name: Check the flannel network configuration
+ fail:
+ msg: "A dedicated containers data network is only supported with Flannel SDN"
+ when:
+ - openstack_private_data_network_name is defined
+ - not openshift_use_flannel|default(False)|bool
diff --git a/playbooks/provisioning/openstack/post-install.yml b/playbooks/provisioning/openstack/post-install.yml
new file mode 100644
index 000000000..417813e2a
--- /dev/null
+++ b/playbooks/provisioning/openstack/post-install.yml
@@ -0,0 +1,57 @@
+---
+- hosts: OSEv3
+ gather_facts: False
+ become: True
+ tasks:
+ - name: Save iptables rules to a backup file
+ when: openshift_use_flannel|default(False)|bool
+ shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S)
+
+# Enable iptables service on app nodes to persist custom rules (flannel SDN)
+# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820
+- hosts: app
+ gather_facts: False
+ become: True
+ vars:
+ os_firewall_allow:
+ - service: dnsmasq tcp
+ port: 53/tcp
+ - service: dnsmasq udp
+ port: 53/udp
+ tasks:
+ - when: openshift_use_flannel|default(False)|bool
+ block:
+ - include_role:
+ name: openshift-ansible/roles/os_firewall
+ - include_role:
+ name: openshift-ansible/roles/lib_os_firewall
+ - name: set allow rules for dnsmasq
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ with_items: "{{ os_firewall_allow }}"
+
+- hosts: OSEv3
+ gather_facts: False
+ become: True
+ tasks:
+ - name: Apply post-install iptables hacks for Flannel SDN (the best effort)
+ when: openshift_use_flannel|default(False)|bool
+ block:
+ - name: set allow/masquerade rules for for flannel/docker
+ shell: >-
+ (iptables-save | grep -q custom-flannel-docker-1) ||
+ iptables -A DOCKER -w
+ -p all -j ACCEPT
+ -m comment --comment "custom-flannel-docker-1";
+ (iptables-save | grep -q custom-flannel-docker-2) ||
+ iptables -t nat -A POSTROUTING -w
+ -o {{flannel_interface|default('eth1')}}
+ -m comment --comment "custom-flannel-docker-2"
+ -j MASQUERADE
+
+ # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked
+ - name: Persist in-memory iptables rules (w/o dynamic KUBE rules)
+ shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables
diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml
new file mode 100644
index 000000000..e460fbf12
--- /dev/null
+++ b/playbooks/provisioning/openstack/post-provision-openstack.yml
@@ -0,0 +1,118 @@
+---
+- hosts: cluster_hosts
+ name: Wait for the the nodes to come up
+ become: False
+ gather_facts: False
+ tasks:
+ - when: not openstack_use_bastion|default(False)|bool
+ wait_for_connection:
+ - when: openstack_use_bastion|default(False)|bool
+ delegate_to: bastion
+ wait_for_connection:
+
+- hosts: cluster_hosts
+ gather_facts: True
+ tasks:
+ - name: Debug hostvar
+ debug:
+ msg: "{{ hostvars[inventory_hostname] }}"
+ verbosity: 2
+
+- name: OpenShift Pre-Requisites (part 1)
+ include: pre-install.yml
+
+- name: Assign hostnames
+ hosts: cluster_hosts
+ gather_facts: False
+ become: true
+ roles:
+ - role: hostnames
+
+- name: Subscribe DNS Host to allow for configuration below
+ hosts: dns
+ gather_facts: False
+ become: true
+ roles:
+ - role: subscription-manager
+ when: hostvars.localhost.rhsm_register|default(False)
+ tags: 'subscription-manager'
+
+- name: Determine which DNS server(s) to use for our generated records
+ hosts: localhost
+ gather_facts: False
+ become: False
+ roles:
+ - dns-server-detect
+
+- name: Build the DNS Server Views and Configure DNS Server(s)
+ hosts: dns
+ gather_facts: False
+ become: true
+ roles:
+ - role: dns-views
+ - role: infra-ansible/roles/dns-server
+
+- name: Build and process DNS Records
+ hosts: localhost
+ gather_facts: True
+ become: False
+ roles:
+ - role: dns-records
+ use_bastion: "{{ openstack_use_bastion|default(False)|bool }}"
+ - role: infra-ansible/roles/dns
+
+- name: Switch the stack subnet to the configured private DNS server
+ hosts: localhost
+ gather_facts: False
+ become: False
+ vars_files:
+ - stack_params.yaml
+ tasks:
+ - include_role:
+ name: openstack-stack
+ tasks_from: subnet_update_dns_servers
+
+- name: OpenShift Pre-Requisites (part 2)
+ hosts: OSEv3
+ gather_facts: true
+ become: true
+ vars:
+ interface: "{{ flannel_interface|default('eth1') }}"
+ interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }}
+ interface_config:
+ DEVICE: "{{ interface }}"
+ TYPE: Ethernet
+ BOOTPROTO: dhcp
+ ONBOOT: 'yes'
+ DEFTROUTE: 'no'
+ PEERDNS: 'no'
+ pre_tasks:
+ - name: "Include DNS configuration to ensure proper name resolution"
+ lineinfile:
+ state: present
+ dest: /etc/sysconfig/network
+ regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}"
+ line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}"
+ - name: "Configure the flannel interface options"
+ when: openshift_use_flannel|default(False)|bool
+ block:
+ - file:
+ dest: "{{ interface_file }}"
+ state: touch
+ mode: 0644
+ owner: root
+ group: root
+ - lineinfile:
+ state: present
+ dest: "{{ interface_file }}"
+ regexp: "{{ item.key }}="
+ line: "{{ item.key }}={{ item.value }}"
+ with_dict: "{{ interface_config }}"
+ roles:
+ - node-network-manager
+
+- include: prepare-and-format-cinder-volume.yaml
+ when: >
+ prepare_and_format_registry_volume|default(False) or
+ (cinder_registry_volume is defined and
+ cinder_registry_volume.changed|default(False))
diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml
new file mode 100644
index 000000000..45e9005cc
--- /dev/null
+++ b/playbooks/provisioning/openstack/pre-install.yml
@@ -0,0 +1,19 @@
+---
+###############################
+# OpenShift Pre-Requisites
+
+# - subscribe hosts
+# - prepare docker
+# - other prep (install additional packages, etc.)
+#
+- hosts: OSEv3
+ become: true
+ roles:
+ - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true }
+ - { role: docker, tags: 'docker' }
+ - { role: openshift-prep, tags: 'openshift-prep' }
+
+- hosts: localhost:cluster_hosts
+ become: False
+ tasks:
+ - include: pre_tasks.yml
diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml
new file mode 100644
index 000000000..11fe2dd84
--- /dev/null
+++ b/playbooks/provisioning/openstack/pre_tasks.yml
@@ -0,0 +1,53 @@
+---
+- name: Generate Environment ID
+ set_fact:
+ env_random_id: "{{ ansible_date_time.epoch }}"
+ run_once: true
+ delegate_to: localhost
+
+- name: Set default Environment ID
+ set_fact:
+ default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}"
+ delegate_to: localhost
+
+- name: Setting Common Facts
+ set_fact:
+ env_id: "{{ env_id | default(default_env_id) }}"
+ delegate_to: localhost
+
+- name: Updating DNS domain to include env_id (if not empty)
+ set_fact:
+ full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}"
+ delegate_to: localhost
+
+- name: Set the APP domain for OpenShift use
+ set_fact:
+ openshift_app_domain: "{{ openshift_app_domain | default('apps') }}"
+ delegate_to: localhost
+
+- name: Set the default app domain for routing purposes
+ set_fact:
+ openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}"
+ delegate_to: localhost
+ when:
+ - openshift_master_default_subdomain is undefined
+
+# Check that openshift_cluster_node_labels has regions defined for all groups
+# NOTE(kpilatov): if node labels are to be enabled for more groups,
+# this check needs to be modified as well
+- name: Set openshift_cluster_node_labels if undefined (should not happen)
+ set_fact:
+ openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}}
+ when: openshift_cluster_node_labels is not defined
+
+- name: Set openshift_cluster_node_labels for the infra group
+ set_fact:
+ openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}"
+
+- name: Set openshift_cluster_node_labels for the app group
+ set_fact:
+ openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}"
+
+- name: Set openshift_cluster_node_labels for auto-scaling app nodes
+ set_fact:
+ openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}"
diff --git a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml
new file mode 100644
index 000000000..30e094459
--- /dev/null
+++ b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml
@@ -0,0 +1,67 @@
+---
+- hosts: localhost
+ gather_facts: False
+ become: False
+ tasks:
+ - set_fact:
+ cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}"
+ cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}"
+
+ - name: Attach the volume to the VM
+ os_server_volume:
+ state: present
+ server: "{{ groups['masters'][0] }}"
+ volume: "{{ cinder_volume }}"
+ register: volume_attachment
+
+ - set_fact:
+ attached_device: >-
+ {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }}
+
+ - delegate_to: "{{ groups['masters'][0] }}"
+ block:
+ - name: Wait for the device to appear
+ wait_for: path={{ attached_device }}
+
+ - name: Create a temp directory for mounting the volume
+ tempfile:
+ prefix: cinder-volume
+ state: directory
+ register: cinder_mount_dir
+
+ - name: Format the device
+ filesystem:
+ fstype: "{{ cinder_fs }}"
+ dev: "{{ attached_device }}"
+
+ - name: Mount the device
+ mount:
+ name: "{{ cinder_mount_dir.path }}"
+ src: "{{ attached_device }}"
+ state: mounted
+ fstype: "{{ cinder_fs }}"
+
+ - name: Change mode on the filesystem
+ file:
+ path: "{{ cinder_mount_dir.path }}"
+ state: directory
+ recurse: true
+ mode: 0777
+
+ - name: Unmount the device
+ mount:
+ name: "{{ cinder_mount_dir.path }}"
+ src: "{{ attached_device }}"
+ state: absent
+ fstype: "{{ cinder_fs }}"
+
+ - name: Delete the temp directory
+ file:
+ name: "{{ cinder_mount_dir.path }}"
+ state: absent
+
+ - name: Detach the volume from the VM
+ os_server_volume:
+ state: absent
+ server: "{{ groups['masters'][0] }}"
+ volume: "{{ cinder_volume }}"
diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml
new file mode 100644
index 000000000..11a31411e
--- /dev/null
+++ b/playbooks/provisioning/openstack/prerequisites.yml
@@ -0,0 +1,123 @@
+---
+- hosts: localhost
+ tasks:
+
+ # Sanity check of inventory variables
+ - include: net_vars_check.yaml
+
+ # Check ansible
+ - name: Check Ansible version
+ assert:
+ that: >
+ (ansible_version.major == 2 and ansible_version.minor >= 3) or
+ (ansible_version.major > 2)
+ msg: "Ansible version must be at least 2.3"
+
+ # Check shade
+ - name: Try to import python module shade
+ command: python -c "import shade"
+ ignore_errors: yes
+ register: shade_result
+ - name: Check if shade is installed
+ assert:
+ that: 'shade_result.rc == 0'
+ msg: "Python module shade is not installed"
+
+ # Check jmespath
+ - name: Try to import python module shade
+ command: python -c "import jmespath"
+ ignore_errors: yes
+ register: jmespath_result
+ - name: Check if jmespath is installed
+ assert:
+ that: 'jmespath_result.rc == 0'
+ msg: "Python module jmespath is not installed"
+
+ # Check python-dns
+ - name: Try to import python DNS module
+ command: python -c "import dns"
+ ignore_errors: yes
+ register: pythondns_result
+ - name: Check if python-dns is installed
+ assert:
+ that: 'pythondns_result.rc == 0'
+ msg: "Python module python-dns is not installed"
+
+ # Check jinja2
+ - name: Try to import jinja2 module
+ command: python -c "import jinja2"
+ ignore_errors: yes
+ register: jinja_result
+ - name: Check if jinja2 is installed
+ assert:
+ that: 'jinja_result.rc == 0'
+ msg: "Python module jinja2 is not installed"
+
+ # Check Glance image
+ - name: Try to get image facts
+ os_image_facts:
+ image: "{{ openstack_default_image_name }}"
+ register: image_result
+ - name: Check that image is available
+ assert:
+ that: "image_result.ansible_facts.openstack_image"
+ msg: "Image {{ openstack_default_image_name }} is not available"
+
+ # Check network name
+ - name: Try to get network facts
+ os_networks_facts:
+ name: "{{ openstack_external_network_name }}"
+ register: network_result
+ when: not openstack_provider_network_name|default(None)
+ - name: Check that network is available
+ assert:
+ that: "network_result.ansible_facts.openstack_networks"
+ msg: "Network {{ openstack_external_network_name }} is not available"
+ when: not openstack_provider_network_name|default(None)
+
+ # Check keypair
+ # TODO kpilatov: there is no Ansible module for getting OS keypairs
+ # (os_keypair is not suitable for this)
+ # this method does not force python-openstackclient dependency
+ - name: Try to show keypair
+ command: >
+ python -c 'import shade; cloud = shade.openstack_cloud();
+ exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)'
+ ignore_errors: yes
+ register: key_result
+ - name: Check that keypair is available
+ assert:
+ that: 'key_result.rc == 0'
+ msg: "Keypair {{ openstack_ssh_public_key }} is not available"
+
+# Check that custom images and flavors exist
+- hosts: localhost
+
+ # Include variables that will be used by heat
+ vars_files:
+ - stack_params.yaml
+
+ tasks:
+ # Check that custom images are available
+ - include: custom_image_check.yaml
+ with_items:
+ - "{{ openstack_master_image }}"
+ - "{{ openstack_infra_image }}"
+ - "{{ openstack_node_image }}"
+ - "{{ openstack_lb_image }}"
+ - "{{ openstack_etcd_image }}"
+ - "{{ openstack_dns_image }}"
+ loop_control:
+ loop_var: image
+
+ # Check that custom flavors are available
+ - include: custom_flavor_check.yaml
+ with_items:
+ - "{{ master_flavor }}"
+ - "{{ infra_flavor }}"
+ - "{{ node_flavor }}"
+ - "{{ lb_flavor }}"
+ - "{{ etcd_flavor }}"
+ - "{{ dns_flavor }}"
+ loop_control:
+ loop_var: flavor
diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml
new file mode 100644
index 000000000..bf424676d
--- /dev/null
+++ b/playbooks/provisioning/openstack/provision-openstack.yml
@@ -0,0 +1,35 @@
+---
+- hosts: localhost
+ gather_facts: True
+ become: False
+ vars_files:
+ - stack_params.yaml
+ pre_tasks:
+ - include: pre_tasks.yml
+ roles:
+ - role: openstack-stack
+ - role: openstack-create-cinder-registry
+ when:
+ - cinder_hosted_registry_name is defined
+ - cinder_hosted_registry_size_gb is defined
+ - role: static_inventory
+ when: openstack_inventory|default('static') == 'static'
+ inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}"
+ private_ssh_key: "{{ openstack_private_ssh_key|default('') }}"
+ ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}"
+ ssh_user: "{{ ansible_user }}"
+
+- name: Refresh Server inventory or exit to apply SSH config
+ hosts: localhost
+ connection: local
+ become: False
+ gather_facts: False
+ tasks:
+ - name: Exit to apply SSH config for a bastion
+ meta: end_play
+ when: openstack_use_bastion|default(False)|bool
+ - name: Refresh Server inventory
+ meta: refresh_inventory
+
+- include: post-provision-openstack.yml
+ when: not openstack_use_bastion|default(False)|bool
diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml
new file mode 100644
index 000000000..474c9c803
--- /dev/null
+++ b/playbooks/provisioning/openstack/provision.yaml
@@ -0,0 +1,4 @@
+---
+- include: "prerequisites.yml"
+
+- include: "provision-openstack.yml"
diff --git a/playbooks/provisioning/openstack/roles b/playbooks/provisioning/openstack/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/provisioning/openstack/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml
new file mode 100644
index 000000000..949a323a7
--- /dev/null
+++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -0,0 +1,59 @@
+---
+openshift_deployment_type: origin
+#openshift_deployment_type: openshift-enterprise
+#openshift_release: v3.5
+openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}"
+
+openshift_master_cluster_method: native
+openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}"
+openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}"
+
+osm_default_node_selector: 'region=primary'
+
+openshift_hosted_router_wait: True
+openshift_hosted_registry_wait: True
+
+## Openstack credentials
+#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
+#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}"
+
+
+## Use Cinder volume for Openshift registry:
+#openshift_hosted_registry_storage_kind: openstack
+#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem: xfs
+
+## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed:
+## https://github.com/openshift/openshift-ansible/issues/5657
+## If you're using the `cinder_hosted_registry_name` option from
+## `all.yml`, uncomment these lines:
+#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}"
+#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi"
+
+## If you're using a Cinder volume you've set up yourself, uncomment these lines:
+#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05
+#openshift_hosted_registry_storage_volume_size: 10Gi
+
+
+# NOTE(shadower): the hostname check seems to always fail because the
+# host's floating IP address doesn't match the address received from
+# inside the host.
+openshift_override_hostname_check: true
+
+# For POCs or demo environments that are using smaller instances than
+# the official recommended values for RAM and DISK, uncomment the line below.
+#openshift_disable_check: disk_availability,memory_availability
+
+# NOTE(shadower): Always switch to root on the OSEv3 nodes.
+# openshift-ansible requires an explicit `become`.
+ansible_become: true
+
+# # Flannel networking
+#osm_cluster_network_cidr: 10.128.0.0/14
+#openshift_use_openshift_sdn: false
+#openshift_use_flannel: true
+#flannel_interface: eth1
diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml
new file mode 100644
index 000000000..83289307d
--- /dev/null
+++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml
@@ -0,0 +1,166 @@
+---
+env_id: "openshift"
+public_dns_domain: "example.com"
+public_dns_nameservers: []
+
+# # Used Hostnames
+# # - set custom hostnames for roles by uncommenting corresponding lines
+#openstack_master_hostname: "master"
+#openstack_infra_hostname: "infra-node"
+#openstack_node_hostname: "app-node"
+#openstack_lb_hostname: "lb"
+#openstack_etcd_hostname: "etcd"
+#openstack_dns_hostname: "dns"
+
+openstack_ssh_public_key: "openshift"
+openstack_external_network_name: "public"
+#openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net"
+# # A dedicated Neutron network name for containers data network
+# # Configures the data network to be separated from openstack_private_network_name
+# # NOTE: this is only supported with Flannel SDN yet
+#openstack_private_data_network_name: "openshift-ansible-{{ stack_name }}-data-net"
+
+## If you want to use a provider network, set its name here.
+## NOTE: the `openstack_external_network_name` and
+## `openstack_private_network_name` options will be ignored when using a
+## provider network.
+#openstack_provider_network_name: "provider"
+
+# # Used Images
+# # - set specific images for roles by uncommenting corresponding lines
+# # - note: do not remove openstack_default_image_name definition
+#openstack_master_image_name: "centos7"
+#openstack_infra_image_name: "centos7"
+#openstack_node_image_name: "centos7"
+#openstack_lb_image_name: "centos7"
+#openstack_etcd_image_name: "centos7"
+#openstack_dns_image_name: "centos7"
+openstack_default_image_name: "centos7"
+
+openstack_num_masters: 1
+openstack_num_infra: 1
+openstack_num_nodes: 2
+
+# # Used Flavors
+# # - set specific flavors for roles by uncommenting corresponding lines
+# # - note: do note remove openstack_default_flavor definition
+#openstack_master_flavor: "m1.medium"
+#openstack_infra_flavor: "m1.medium"
+#openstack_node_flavor: "m1.medium"
+#openstack_lb_flavor: "m1.medium"
+#openstack_etcd_flavor: "m1.medium"
+#openstack_dns_flavor: "m1.medium"
+openstack_default_flavor: "m1.medium"
+
+# # Numerical index of nodes to remove
+# openstack_nodes_to_remove: []
+
+# # Docker volume size
+# # - set specific volume size for roles by uncommenting corresponding lines
+# # - note: do not remove docker_default_volume_size definition
+#docker_master_volume_size: "15"
+#docker_infra_volume_size: "15"
+#docker_node_volume_size: "15"
+#docker_etcd_volume_size: "2"
+#docker_dns_volume_size: "1"
+#docker_lb_volume_size: "5"
+docker_volume_size: "15"
+
+## Specify server group policies for master and infra nodes. Nova must be configured to
+## enable these policies. 'anti-affinity' will ensure that each VM is launched on a
+## different physical host.
+#openstack_master_server_group_policies: [anti-affinity]
+#openstack_infra_server_group_policies: [anti-affinity]
+
+## Create a Cinder volume and use it for the OpenShift registry.
+## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml!
+#cinder_hosted_registry_name: cinder-registry
+#cinder_hosted_registry_size_gb: 10
+
+## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`.
+## You need to specify the file system and volume ID in OSEv3 via
+## `openshift_hosted_registry_storage_openstack_filesystem` and
+## `openshift_hosted_registry_storage_openstack_volumeID`.
+## WARNING: This will delete any data on the volume!
+#prepare_and_format_registry_volume: False
+
+openstack_subnet_prefix: "192.168.99"
+
+## Red Hat subscription defaults to false which means we will not attempt to
+## subscribe the nodes
+#rhsm_register: False
+
+# # Using Red Hat Satellite:
+#rhsm_register: True
+#rhsm_satellite: 'sat-6.example.com'
+#rhsm_org: 'OPENSHIFT_ORG'
+#rhsm_activationkey: '<activation-key>'
+
+# # Or using RHN username, password and optionally pool:
+#rhsm_register: True
+#rhsm_username: '<username>'
+#rhsm_password: '<password>'
+#rhsm_pool: '<pool id>'
+
+#rhsm_repos:
+# - "rhel-7-server-rpms"
+# - "rhel-7-server-ose-3.5-rpms"
+# - "rhel-7-server-extras-rpms"
+# - "rhel-7-fast-datapath-rpms"
+
+
+# # Roll-your-own DNS
+#openstack_num_dns: 0
+#external_nsupdate_keys:
+# public:
+# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg=='
+# key_algorithm: 'hmac-md5'
+# server: '192.168.1.1'
+# private:
+# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw=='
+# key_algorithm: 'hmac-md5'
+# server: '192.168.1.2'
+
+# # Customize DNS server security options
+#named_public_recursion: 'no'
+#named_private_recursion: 'yes'
+
+
+# NOTE(shadower): Do not change this value. The Ansible user is currently
+# hardcoded to `openshift`.
+ansible_user: openshift
+
+# # Use a single security group for a cluster (default: false)
+#openstack_flat_secgrp: false
+
+# # Openstack inventory type and cluster nodes access pattern
+# # Defaults to 'static'.
+# # Use 'dynamic' to access cluster nodes directly, via floating IPs
+# # and given a dynamic inventory script, like openstack.py
+#openstack_inventory: static
+# # The path to checkpoint the static inventory from the in-memory one
+#openstack_inventory_path: ../../../../inventory
+
+# # Use bastion node to access cluster nodes (Defaults to False).
+# # Requires a static inventory.
+#openstack_use_bastion: False
+#bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24"
+#
+# # The Nova key-pair's private SSH key to access inventory nodes
+#openstack_private_ssh_key: ~/.ssh/openshift
+# # The path for the SSH config to access all nodes
+#openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }}
+
+
+# If you want to use the VM storage instead of Cinder volumes, set this to `true`.
+# NOTE: this is for testing only! Your data will be gone once the VM disappears!
+# ephemeral_volumes: false
+
+# # OpenShift node labels
+# # - in order to customise node labels for app and/or infra group, set the
+# # openshift_cluster_node_labels variable
+#openshift_cluster_node_labels:
+# app:
+# region: primary
+# infra:
+# region: infra
diff --git a/playbooks/provisioning/openstack/sample-inventory/inventory.py b/playbooks/provisioning/openstack/sample-inventory/inventory.py
new file mode 100755
index 000000000..6a1b74b3d
--- /dev/null
+++ b/playbooks/provisioning/openstack/sample-inventory/inventory.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import json
+
+import shade
+
+
+if __name__ == '__main__':
+ cloud = shade.openstack_cloud()
+
+ inventory = {}
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ masters = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'master']
+
+ etcd = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'etcd']
+ if not etcd:
+ etcd = masters
+
+ infra_hosts = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'node' and
+ server.metadata['sub-host-type'] == 'infra']
+
+ app = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'node' and
+ server.metadata['sub-host-type'] == 'app']
+
+ nodes = list(set(masters + infra_hosts + app))
+
+ dns = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'dns']
+
+ lb = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'lb']
+
+ osev3 = list(set(nodes + etcd + lb))
+
+ groups = [server.metadata.group for server in cluster_hosts
+ if 'group' in server.metadata]
+
+ inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
+ inventory['OSEv3'] = {'hosts': osev3}
+ inventory['masters'] = {'hosts': masters}
+ inventory['etcd'] = {'hosts': etcd}
+ inventory['nodes'] = {'hosts': nodes}
+ inventory['infra_hosts'] = {'hosts': infra_hosts}
+ inventory['app'] = {'hosts': app}
+ inventory['dns'] = {'hosts': dns}
+ inventory['lb'] = {'hosts': lb}
+
+ for server in cluster_hosts:
+ if 'group' in server.metadata:
+ group = server.metadata.group
+ if group not in inventory:
+ inventory[group] = {'hosts': []}
+ inventory[group]['hosts'].append(server.name)
+
+ inventory['_meta'] = {'hostvars': {}}
+
+ for server in cluster_hosts:
+ ssh_ip_address = server.public_v4 or server.private_v4
+ vars = {
+ 'ansible_host': ssh_ip_address
+ }
+
+ public_v4 = server.public_v4 or server.private_v4
+ if public_v4:
+ vars['public_v4'] = public_v4
+ # TODO(shadower): what about multiple networks?
+ if server.private_v4:
+ vars['private_v4'] = server.private_v4
+
+ node_labels = server.metadata.get('node_labels')
+ if node_labels:
+ vars['openshift_node_labels'] = node_labels
+
+ inventory['_meta']['hostvars'][server.name] = vars
+
+ print(json.dumps(inventory, indent=4, sort_keys=True))
diff --git a/playbooks/provisioning/openstack/scale-up.yaml b/playbooks/provisioning/openstack/scale-up.yaml
new file mode 100644
index 000000000..79fc09050
--- /dev/null
+++ b/playbooks/provisioning/openstack/scale-up.yaml
@@ -0,0 +1,75 @@
+---
+# Get the needed information about the current deployment
+- hosts: masters[0]
+ tasks:
+ - name: Get number of app nodes
+ shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l
+ register: oc_old_num_nodes
+ - name: Get names of app nodes
+ shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " "
+ register: oc_old_app_nodes
+
+- hosts: localhost
+ tasks:
+ # Since both number and names of app nodes are to be removed
+ # localhost variables for these values need to be set
+ - name: Store old number and names of app nodes locally (if there is an existing deployment)
+ when: '"masters" in groups'
+ register: set_fact_result
+ set_fact:
+ oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}"
+ oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}"
+
+ - name: Set default values for old app nodes (if there is no existing deployment)
+ when: 'set_fact_result | skipped'
+ set_fact:
+ oc_old_num_nodes: 0
+ oc_old_app_nodes: []
+
+ # Set how many nodes are to be added (1 by default)
+ - name: Set how many nodes are to be added
+ set_fact:
+ increment_by: 1
+ - name: Check that the number corresponds to scaling up (not down)
+ assert:
+ that: 'increment_by | int >= 1'
+ msg: >
+ FAIL: The value of increment_by must be at least 1
+ (but it is {{ increment_by | int }}).
+ - name: Update openstack_num_nodes variable
+ set_fact:
+ openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}"
+
+# Run provision.yaml with higher number of nodes to create a new app-node VM
+- include: provision.yaml
+
+# Run config.yml to perform openshift installation
+# Path to openshift-ansible can be customised:
+# - the value of openshift_ansible_dir has to be an absolute path
+# - the path cannot contain the '/' symbol at the end
+
+# Creating a new deployment by the full installation
+- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml"
+ vars:
+ openshift_ansible_dir: ../../../../openshift-ansible
+ when: 'not groups["new_nodes"] | list'
+
+# Scaling up existing deployment
+- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml"
+ vars:
+ openshift_ansible_dir: ../../../../openshift-ansible
+ when: 'groups["new_nodes"] | list'
+
+# Post-verification: Verify new number of nodes
+- hosts: masters[0]
+ tasks:
+ - name: Get number of nodes
+ shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l
+ register: oc_new_num_nodes
+ - name: Check that the actual result matches the defined value
+ assert:
+ that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)'
+ msg: >
+ FAIL: Number of application nodes has not been increased accordingly
+ (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }}
+ but it is {{ oc_new_num_nodes.stdout | int }}).
diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml
new file mode 100644
index 000000000..a4da31bfe
--- /dev/null
+++ b/playbooks/provisioning/openstack/stack_params.yaml
@@ -0,0 +1,49 @@
+---
+stack_name: "{{ env_id }}.{{ public_dns_domain }}"
+dns_domain: "{{ public_dns_domain }}"
+dns_nameservers: "{{ public_dns_nameservers }}"
+subnet_prefix: "{{ openstack_subnet_prefix }}"
+master_hostname: "{{ openstack_master_hostname | default('master') }}"
+infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}"
+node_hostname: "{{ openstack_node_hostname | default('app-node') }}"
+lb_hostname: "{{ openstack_lb_hostname | default('lb') }}"
+etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}"
+dns_hostname: "{{ openstack_dns_hostname | default('dns') }}"
+ssh_public_key: "{{ openstack_ssh_public_key }}"
+openstack_image: "{{ openstack_default_image_name }}"
+lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}"
+etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}"
+master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}"
+node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}"
+infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}"
+dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}"
+openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}"
+openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}"
+openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}"
+openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}"
+openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}"
+openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}"
+openstack_private_network: >-
+ {% if openstack_provider_network_name | default(None) -%}
+ {{ openstack_provider_network_name }}
+ {%- else -%}
+ {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }}
+ {%- endif -%}
+provider_network: "{{ openstack_provider_network_name | default(None) }}"
+external_network: "{{ openstack_external_network_name | default(None) }}"
+num_etcd: "{{ openstack_num_etcd | default(0) }}"
+num_masters: "{{ openstack_num_masters }}"
+num_nodes: "{{ openstack_num_nodes }}"
+num_infra: "{{ openstack_num_infra }}"
+num_dns: "{{ openstack_num_dns | default(1) }}"
+master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}"
+infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}"
+master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}"
+infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}"
+node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}"
+etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}"
+dns_volume_size: "{{ docker_dns_volume_size | default('1') }}"
+lb_volume_size: "{{ docker_lb_volume_size | default('5') }}"
+nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}"
+use_bastion: "{{ openstack_use_bastion|default(False) }}"
+ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}"
diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml
new file mode 100644
index 000000000..8db591374
--- /dev/null
+++ b/roles/common/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_cluster_node_labels:
+ app:
+ region: primary
+ infra:
+ region: infra
diff --git a/roles/dns-records/defaults/main.yml b/roles/dns-records/defaults/main.yml
new file mode 100644
index 000000000..3f7fa783f
--- /dev/null
+++ b/roles/dns-records/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+use_bastion: False
diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml
new file mode 100644
index 000000000..7148b016a
--- /dev/null
+++ b/roles/dns-records/tasks/main.yml
@@ -0,0 +1,121 @@
+---
+- name: "Generate list of private A records"
+ set_fact:
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}"
+ with_items: "{{ groups['cluster_hosts'] }}"
+
+- name: "Add wildcard records to the private A records for infrahosts"
+ set_fact:
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}"
+ with_items: "{{ groups['infra_hosts'] }}"
+
+- name: "Add public master cluster hostname records to the private A records (single master)"
+ set_fact:
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}"
+ when:
+ - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+ - openstack_num_masters == 1
+
+- name: "Add public master cluster hostname records to the private A records (multi-master)"
+ set_fact:
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}"
+ when:
+ - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+ - openstack_num_masters > 1
+
+- name: "Set the private DNS server to use the external value (if provided)"
+ set_fact:
+ nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}"
+ nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}"
+ nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}"
+ nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}"
+ when:
+ - external_nsupdate_keys is defined
+ - external_nsupdate_keys['private'] is defined
+
+- name: "Set the private DNS server to use the provisioned value"
+ set_fact:
+ nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}"
+ nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}"
+ nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}"
+ when:
+ - nsupdate_server_private is undefined
+
+- name: "Generate the private Add section for DNS"
+ set_fact:
+ private_named_records:
+ - view: "private"
+ zone: "{{ full_dns_domain }}"
+ server: "{{ nsupdate_server_private }}"
+ key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}"
+ key_secret: "{{ nsupdate_key_secret_private }}"
+ key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}"
+ entries: "{{ private_records }}"
+
+- name: "Generate list of public A records"
+ set_fact:
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}"
+ with_items: "{{ groups['cluster_hosts'] }}"
+ when: hostvars[item]['public_v4'] is defined
+
+- name: "Add wildcard records to the public A records"
+ set_fact:
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}"
+ with_items: "{{ groups['infra_hosts'] }}"
+ when: hostvars[item]['public_v4'] is defined
+
+- name: "Add public master cluster hostname records to the public A records (single master)"
+ set_fact:
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}"
+ when:
+ - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+ - openstack_num_masters == 1
+ - not use_bastion|bool
+
+- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)"
+ set_fact:
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}"
+ when:
+ - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+ - openstack_num_masters == 1
+ - use_bastion|bool
+
+- name: "Add public master cluster hostname records to the public A records (multi-master)"
+ set_fact:
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}"
+ when:
+ - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+ - openstack_num_masters > 1
+
+- name: "Set the public DNS server details to use the external value (if provided)"
+ set_fact:
+ nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}"
+ nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}"
+ nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}"
+ nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}"
+ when:
+ - external_nsupdate_keys is defined
+ - external_nsupdate_keys['public'] is defined
+
+- name: "Set the public DNS server details to use the provisioned value"
+ set_fact:
+ nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}"
+ nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}"
+ nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}"
+ when:
+ - nsupdate_server_public is undefined
+
+- name: "Generate the public Add section for DNS"
+ set_fact:
+ public_named_records:
+ - view: "public"
+ zone: "{{ full_dns_domain }}"
+ server: "{{ nsupdate_server_public }}"
+ key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}"
+ key_secret: "{{ nsupdate_key_secret_public }}"
+ key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}"
+ entries: "{{ public_records }}"
+
+- name: "Generate the final dns_records_add"
+ set_fact:
+ dns_records_add: "{{ private_named_records + public_named_records }}"
diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml
new file mode 100644
index 000000000..58bd861cd
--- /dev/null
+++ b/roles/dns-server-detect/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+external_nsupdate_keys: {}
diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml
new file mode 100644
index 000000000..cd775814f
--- /dev/null
+++ b/roles/dns-server-detect/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+- fail:
+ msg: 'Missing required private DNS server(s)'
+ when:
+ - external_nsupdate_keys['private'] is undefined
+ - hostvars[groups['dns'][0]] is undefined
+
+- fail:
+ msg: 'Missing required public DNS server(s)'
+ when:
+ - external_nsupdate_keys['public'] is undefined
+ - hostvars[groups['dns'][0]] is undefined
+
+- name: "Set the private DNS server to use the external value (if provided)"
+ set_fact:
+ private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}"
+ when:
+ - external_nsupdate_keys['private'] is defined
+
+- name: "Set the private DNS server to use the provisioned value"
+ set_fact:
+ private_dns_server: "{{ hostvars[groups['dns'][0]].private_v4 }}"
+ when:
+ - private_dns_server is undefined
+
+- name: "Set the public DNS server to use the external value (if provided)"
+ set_fact:
+ public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}"
+ when:
+ - external_nsupdate_keys['public'] is defined
+
+- name: "Set the public DNS server to use the provisioned value"
+ set_fact:
+ public_dns_server: "{{ hostvars[groups['dns'][0]].public_v4 }}"
+ when:
+ - public_dns_server is undefined
diff --git a/roles/dns-views/defaults/main.yml b/roles/dns-views/defaults/main.yml
new file mode 100644
index 000000000..c9f8248af
--- /dev/null
+++ b/roles/dns-views/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+external_nsupdate_keys: {}
+named_private_recursion: 'yes'
+named_public_recursion: 'no'
diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml
new file mode 100644
index 000000000..ffbad2e3f
--- /dev/null
+++ b/roles/dns-views/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: "Generate ACL list for DNS server"
+ set_fact:
+ acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}"
+ with_items: "{{ groups['cluster_hosts'] }}"
+
+- name: "Generate the private view"
+ set_fact:
+ private_named_view:
+ - name: "private"
+ recursion: "{{ named_private_recursion }}"
+ acl_entry: "{{ acl_list }}"
+ zone:
+ - dns_domain: "{{ full_dns_domain }}"
+ forwarder: "{{ public_dns_nameservers }}"
+ when: external_nsupdate_keys['private'] is undefined
+
+- name: "Generate the public view"
+ set_fact:
+ public_named_view:
+ - name: "public"
+ recursion: "{{ named_public_recursion }}"
+ zone:
+ - dns_domain: "{{ full_dns_domain }}"
+ forwarder: "{{ public_dns_nameservers }}"
+ when: external_nsupdate_keys['public'] is undefined
+
+- name: "Generate the final named_config_views"
+ set_fact:
+ named_config_views: "{{ private_named_view|default([]) + public_named_view|default([]) }}"
diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml
new file mode 100644
index 000000000..062f543ad
--- /dev/null
+++ b/roles/docker-storage-setup/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+docker_dev: "/dev/sdb"
+docker_vg: "docker-vol"
+docker_data_size: "95%VG"
+docker_dm_basesize: "3G"
+container_root_lv_name: "dockerlv"
+container_root_lv_mount_path: "/var/lib/docker"
diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml
new file mode 100644
index 000000000..8606eeba4
--- /dev/null
+++ b/roles/docker-storage-setup/tasks/main.yaml
@@ -0,0 +1,46 @@
+---
+- name: stop docker
+ service: name=docker state=stopped
+
+- block:
+ - name: create the docker-storage config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+ when:
+ - ansible_distribution_version | version_compare('7.4', '>=')
+ - ansible_distribution == "RedHat"
+
+- block:
+ - name: create the docker-storage-setup config file
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+ when:
+ - ansible_distribution_version | version_compare('7.4', '<')
+ - ansible_distribution == "RedHat"
+
+- block:
+ - name: create the docker-storage-setup config file for CentOS
+ template:
+ src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0644
+
+ # TODO(shadower): Find out which CentOS version supports overlayfs2
+ when:
+ - ansible_distribution == "CentOS"
+
+- name: Install Docker
+ package: name=docker state=present
+
+- name: start docker
+ service: name=docker state=restarted enabled=true
diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
new file mode 100644
index 000000000..b5869feff
--- /dev/null
+++ b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
@@ -0,0 +1,4 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
new file mode 100644
index 000000000..d8b4a0276
--- /dev/null
+++ b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
@@ -0,0 +1,7 @@
+DEVS="{{ docker_dev }}"
+VG="{{ docker_vg }}"
+DATA_SIZE="{{ docker_data_size }}"
+STORAGE_DRIVER=overlay2
+CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
+CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
+CONTAINER_ROOT_LV_SIZE=100%FREE
diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml
new file mode 100644
index 000000000..c49852210
--- /dev/null
+++ b/roles/hostnames/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+- name: Setting Hostname Fact
+ set_fact:
+ new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}"
+
+- name: Setting FQDN Fact
+ set_fact:
+ new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}"
+
+- name: Setting hostname and DNS domain
+ hostname: name="{{ new_fqdn }}"
+
+- name: Check for cloud.cfg
+ stat: path=/etc/cloud/cloud.cfg
+ register: cloud_cfg
+
+- name: Prevent cloud-init updates of hostname/fqdn (if applicable)
+ lineinfile:
+ dest: /etc/cloud/cloud.cfg
+ state: present
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: '^ - set_hostname', line: '# - set_hostname' }
+ - { regexp: '^ - update_hostname', line: '# - update_hostname' }
+ when: cloud_cfg.stat.exists == True
diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv
new file mode 100644
index 000000000..ffbe6e03d
--- /dev/null
+++ b/roles/hostnames/test/inv
@@ -0,0 +1,12 @@
+[all:vars]
+dns_domain=example.com
+
+[openshift_masters]
+192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41
+192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117
+
+[openshift_nodes]
+192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40
+
+#[dns]
+#192.168.124.117 dns_private_ip=1.1.1.117
diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/roles/hostnames/test/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry
new file mode 100644
index 000000000..63fc08e4c
--- /dev/null
+++ b/roles/hostnames/test/test.retry
@@ -0,0 +1,3 @@
+192.168.124.117
+192.168.124.40
+192.168.124.41
diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml
new file mode 100644
index 000000000..0c56aea51
--- /dev/null
+++ b/roles/hostnames/test/test.yaml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ roles:
+ - role: hostnames
diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml
new file mode 100644
index 000000000..3eecb8dc4
--- /dev/null
+++ b/roles/hostnames/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+counter: 1
diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml
new file mode 100644
index 000000000..0cadc8181
--- /dev/null
+++ b/roles/hostnames/vars/records.yaml
@@ -0,0 +1,28 @@
+---
+- name: "Building Records"
+ set_fact:
+ dns_records_add:
+ - view: private
+ zone: example.com
+ entries:
+ - type: A
+ hostname: master1.example.com
+ ip: 172.16.15.94
+ - type: A
+ hostname: node1.example.com
+ ip: 172.16.15.86
+ - type: A
+ hostname: node2.example.com
+ ip: 172.16.15.87
+ - view: public
+ zone: example.com
+ entries:
+ - type: A
+ hostname: master1.example.com
+ ip: 10.3.10.116
+ - type: A
+ hostname: node1.example.com
+ ip: 10.3.11.46
+ - type: A
+ hostname: node2.example.com
+ ip: 10.3.12.6
diff --git a/roles/node-network-manager/tasks/main.yml b/roles/node-network-manager/tasks/main.yml
new file mode 100644
index 000000000..6a17855e7
--- /dev/null
+++ b/roles/node-network-manager/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- name: install NetworkManager
+ package:
+ name: NetworkManager
+ state: present
+
+- name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+- name: enable and start NetworkManager
+ service:
+ name: NetworkManager
+ state: restarted
+ enabled: yes
diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml
new file mode 100644
index 000000000..c8c9a00c0
--- /dev/null
+++ b/roles/openshift-prep/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# Defines either to install required packages and update all
+manage_packages: true
+install_debug_packages: false
+required_packages:
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - bridge-utils
+debug_packages:
+ - bash-completion
+ - vim-enhanced
diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml
new file mode 100644
index 000000000..5e484e75f
--- /dev/null
+++ b/roles/openshift-prep/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+# Starting Point for OpenShift Installation and Configuration
+- include: prerequisites.yml
+ tags: [prerequisites]
diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml
new file mode 100644
index 000000000..b7601aa48
--- /dev/null
+++ b/roles/openshift-prep/tasks/prerequisites.yml
@@ -0,0 +1,37 @@
+---
+- name: "Cleaning yum repositories"
+ command: "yum clean all"
+
+- name: "Install required packages"
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items: "{{ required_packages }}"
+ when: manage_packages|bool
+
+- name: "Install debug packages (optional)"
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items: "{{ debug_packages }}"
+ when: install_debug_packages|bool
+
+- name: "Update all packages (this can take a very long time)"
+ yum:
+ name: '*'
+ state: latest
+ when: manage_packages|bool
+
+- name: "Verify hostname"
+ shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }'
+ register: hostname_fqdn
+
+- name: "Set hostname if required"
+ hostname:
+ name: "{{ ansible_fqdn }}"
+ when: hostname_fqdn.stdout != ansible_fqdn
+
+- name: "Verify SELinux is enforcing"
+ fail:
+ msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'"
+ when: ansible_selinux.config_mode != "enforcing"
diff --git a/roles/openstack-create-cinder-registry/tasks/main.yaml b/roles/openstack-create-cinder-registry/tasks/main.yaml
new file mode 100644
index 000000000..6e9d1c2e7
--- /dev/null
+++ b/roles/openstack-create-cinder-registry/tasks/main.yaml
@@ -0,0 +1,5 @@
+---
+- os_volume:
+ display_name: "{{ cinder_hosted_registry_name }}"
+ size: "{{ cinder_hosted_registry_size_gb }}"
+ register: cinder_registry_volume
diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md
new file mode 100644
index 000000000..32a2b49f1
--- /dev/null
+++ b/roles/openstack-stack/README.md
@@ -0,0 +1,9 @@
+# Role openstack-stack
+
+Role for spinning up instances using OpenStack Heat.
+
+## To Test
+
+```
+ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml
+```
diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml
new file mode 100644
index 000000000..a24e684cc
--- /dev/null
+++ b/roles/openstack-stack/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+
+stack_state: 'present'
+
+ssh_ingress_cidr: 0.0.0.0/0
+node_ingress_cidr: 0.0.0.0/0
+master_ingress_cidr: 0.0.0.0/0
+lb_ingress_cidr: 0.0.0.0/0
+bastion_ingress_cidr: 0.0.0.0/0
+num_etcd: 0
+num_masters: 1
+num_nodes: 1
+num_dns: 1
+num_infra: 1
+nodes_to_remove: []
+etcd_volume_size: 2
+dns_volume_size: 1
+lb_volume_size: 5
+use_bastion: False
+ui_ssh_tunnel: False
+provider_network: False
diff --git a/roles/openstack-stack/meta/main.yml b/roles/openstack-stack/meta/main.yml
new file mode 100644
index 000000000..fdda41bb3
--- /dev/null
+++ b/roles/openstack-stack/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: common
diff --git a/roles/openstack-stack/tasks/cleanup.yml b/roles/openstack-stack/tasks/cleanup.yml
new file mode 100644
index 000000000..258334a6b
--- /dev/null
+++ b/roles/openstack-stack/tasks/cleanup.yml
@@ -0,0 +1,6 @@
+---
+
+- name: cleanup temp files
+ file:
+ path: "{{ stack_template_pre.path }}"
+ state: absent
diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml
new file mode 100644
index 000000000..0ff50a095
--- /dev/null
+++ b/roles/openstack-stack/tasks/generate-templates.yml
@@ -0,0 +1,26 @@
+---
+- name: create HOT stack template prefix
+ register: stack_template_pre
+ tempfile:
+ state: directory
+ prefix: openshift-ansible
+
+- name: set template paths
+ set_fact:
+ stack_template_path: "{{ stack_template_pre.path }}/stack.yaml"
+ user_data_template_path: "{{ stack_template_pre.path }}/user-data"
+
+- name: generate HOT stack template from jinja2 template
+ template:
+ src: heat_stack.yaml.j2
+ dest: "{{ stack_template_path }}"
+
+- name: generate HOT server template from jinja2 template
+ template:
+ src: heat_stack_server.yaml.j2
+ dest: "{{ stack_template_pre.path }}/server.yaml"
+
+- name: generate user_data from jinja2 template
+ template:
+ src: user_data.j2
+ dest: "{{ user_data_template_path }}"
diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml
new file mode 100644
index 000000000..983567026
--- /dev/null
+++ b/roles/openstack-stack/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+
+- name: Generate the templates
+ include: generate-templates.yml
+ when:
+ - stack_state == 'present'
+
+- name: Handle the Stack (create/delete)
+ ignore_errors: False
+ register: stack_create
+ os_stack:
+ name: "{{ stack_name }}"
+ state: "{{ stack_state }}"
+ template: "{{ stack_template_path | default(omit) }}"
+ wait: yes
+
+# NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for
+# dns_nameservers, so we can't do that for the "create stack" task.
+- include: subnet_update_dns_servers.yaml
+ when:
+ - private_dns_server is defined
+ - stack_state == 'present'
+
+- name: CleanUp
+ include: cleanup.yml
+ when:
+ - stack_state == 'present'
diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml
new file mode 100644
index 000000000..af28fc98f
--- /dev/null
+++ b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml
@@ -0,0 +1,9 @@
+---
+- name: Live update the subnet's DNS servers
+ os_subnet:
+ name: openshift-ansible-{{ stack_name }}-subnet
+ network_name: openshift-ansible-{{ stack_name }}-net
+ state: present
+ use_default_subnetpool: yes
+ dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}"
+ when: not provider_network
diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2
new file mode 100644
index 000000000..2359842a5
--- /dev/null
+++ b/roles/openstack-stack/templates/heat_stack.yaml.j2
@@ -0,0 +1,888 @@
+heat_template_version: 2016-10-14
+
+description: OpenShift cluster
+
+parameters:
+
+outputs:
+
+ etcd_names:
+ description: Name of the etcds
+ value: { get_attr: [ etcd, name ] }
+
+ etcd_ips:
+ description: IPs of the etcds
+ value: { get_attr: [ etcd, private_ip ] }
+
+ etcd_floating_ips:
+ description: Floating IPs of the etcds
+ value: { get_attr: [ etcd, floating_ip ] }
+
+ master_names:
+ description: Name of the masters
+ value: { get_attr: [ masters, name ] }
+
+ master_ips:
+ description: IPs of the masters
+ value: { get_attr: [ masters, private_ip ] }
+
+ master_floating_ips:
+ description: Floating IPs of the masters
+ value: { get_attr: [ masters, floating_ip ] }
+
+ node_names:
+ description: Name of the nodes
+ value: { get_attr: [ compute_nodes, name ] }
+
+ node_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ compute_nodes, private_ip ] }
+
+ node_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ compute_nodes, floating_ip ] }
+
+ infra_names:
+ description: Name of the nodes
+ value: { get_attr: [ infra_nodes, name ] }
+
+ infra_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ infra_nodes, private_ip ] }
+
+ infra_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ infra_nodes, floating_ip ] }
+
+{% if num_dns|int > 0 %}
+ dns_name:
+ description: Name of the DNS
+ value:
+ get_attr:
+ - dns
+ - name
+
+ dns_floating_ips:
+ description: Floating IPs of the DNS
+ value: { get_attr: [ dns, floating_ip ] }
+
+ dns_private_ips:
+ description: Private IPs of the DNS
+ value: { get_attr: [ dns, private_ip ] }
+{% endif %}
+
+conditions:
+ no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %}
+
+resources:
+
+{% if not provider_network %}
+ net:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-subnet
+ params:
+ cluster_id: {{ stack_name }}
+ network: { get_resource: net }
+ cidr:
+ str_replace:
+ template: subnet_24_prefix.0/24
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ allocation_pools:
+ - start:
+ str_replace:
+ template: subnet_24_prefix.3
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ end:
+ str_replace:
+ template: subnet_24_prefix.254
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ dns_nameservers:
+{% for nameserver in dns_nameservers %}
+ - {{ nameserver }}
+{% endfor %}
+
+{% if openshift_use_flannel|default(False)|bool %}
+ data_net:
+ type: OS::Neutron::Net
+ properties:
+ name: openshift-ansible-{{ stack_name }}-data-net
+ port_security_enabled: false
+
+ data_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name: openshift-ansible-{{ stack_name }}-data-subnet
+ network: { get_resource: data_net }
+ cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }}
+ gateway_ip: null
+{% endif %}
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-router
+ params:
+ cluster_id: {{ stack_name }}
+ external_gateway_info:
+ network: {{ external_network }}
+
+ interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: subnet }
+
+{% endif %}
+
+# keypair:
+# type: OS::Nova::KeyPair
+# properties:
+# name:
+# str_replace:
+# template: openshift-ansible-cluster_id-keypair
+# params:
+# cluster_id: {{ stack_name }}
+# public_key: {{ ssh_public_key }}
+
+ common-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-common-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Basic ssh/icmp security group for cluster_id OpenShift cluster
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+{% if use_bastion|bool %}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ bastion_ingress_cidr }}
+{% endif %}
+ - direction: ingress
+ protocol: icmp
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+
+{% if openstack_flat_secgrp|default(False)|bool %}
+ flat-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-flat-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 4001
+ port_range_max: 4001
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_api_port|default(8443) }}
+ port_range_max: {{ openshift_master_api_port|default(8443) }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_console_port|default(8443) }}
+ port_range_max: {{ openshift_master_console_port|default(8443) }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: udp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2224
+ port_range_max: 2224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5404
+ port_range_max: 5405
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 9090
+ port_range_max: 9090
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2380
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: {{ node_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24"
+{% else %}
+ master-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-master-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster master
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 4001
+ port_range_max: 4001
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_api_port|default(8443) }}
+ port_range_max: {{ openshift_master_api_port|default(8443) }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_console_port|default(8443) }}
+ port_range_max: {{ openshift_master_console_port|default(8443) }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: udp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2224
+ port_range_max: 2224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5404
+ port_range_max: 5405
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 9090
+ port_range_max: 9090
+{% if openshift_use_flannel|default(False)|bool %}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2379
+{% endif %}
+
+ etcd-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-etcd-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id etcd cluster
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2379
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2380
+ port_range_max: 2380
+ remote_mode: remote_group_id
+
+ node-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-node-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster nodes
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: {{ node_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24"
+{% endif %}
+
+ infra-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-infra-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift infrastructure cluster nodes
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 80
+ port_range_max: 80
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 443
+ port_range_max: 443
+
+{% if num_dns|int > 0 %}
+ dns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-dns-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id cluster DNS
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: {{ node_ingress_cidr }}
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24"
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: {{ node_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24"
+{% endif %}
+
+{% if num_masters|int > 1 or ui_ssh_tunnel|bool %}
+ lb-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: openshift-ansible-{{ stack_name }}-lb-secgrp
+ description: Security group for {{ stack_name }} cluster Load Balancer
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_api_port | default(8443) }}
+ port_range_max: {{ openshift_master_api_port | default(8443) }}
+ remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }}
+{% if ui_ssh_tunnel|bool %}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_api_port | default(8443) }}
+ port_range_max: {{ openshift_master_api_port | default(8443) }}
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+{% endif %}
+{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_console_port | default(8443) }}
+ port_range_max: {{ openshift_master_console_port | default(8443) }}
+ remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }}
+{% endif %}
+{% endif %}
+
+ etcd:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_etcd }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: {{ etcd_hostname | default('etcd') }}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: etcds
+ cluster_id: {{ stack_name }}
+ type: etcd
+ image: {{ openstack_etcd_image | default(openstack_image) }}
+ flavor: {{ etcd_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% endif %}
+ secgrp:
+ - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} }
+ - { get_resource: common-secgrp }
+ floating_network:
+ if:
+ - no_floating
+ - null
+ - {{ external_network }}
+{% if use_bastion|bool or provider_network %}
+ attach_float_net: false
+{% endif %}
+ volume_size: {{ etcd_volume_size }}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+
+{% if master_server_group_policies|length > 0 %}
+ master_server_group:
+ type: OS::Nova::ServerGroup
+ properties:
+ name: master_server_group
+ policies: {{ master_server_group_policies }}
+{% endif %}
+{% if infra_server_group_policies|length > 0 %}
+ infra_server_group:
+ type: OS::Nova::ServerGroup
+ properties:
+ name: infra_server_group
+ policies: {{ infra_server_group_policies }}
+{% endif %}
+{% if num_masters|int > 1 %}
+ loadbalancer:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 1
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: {{ lb_hostname | default('lb') }}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: lb
+ cluster_id: {{ stack_name }}
+ type: lb
+ image: {{ openstack_lb_image | default(openstack_image) }}
+ flavor: {{ lb_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% endif %}
+ secgrp:
+ - { get_resource: lb-secgrp }
+ - { get_resource: common-secgrp }
+{% if not provider_network %}
+ floating_network: {{ external_network }}
+{% endif %}
+ volume_size: {{ lb_volume_size }}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+{% endif %}
+
+ masters:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_masters }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: {{ master_hostname | default('master')}}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: masters
+ cluster_id: {{ stack_name }}
+ type: master
+ image: {{ openstack_master_image | default(openstack_image) }}
+ flavor: {{ master_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net: true
+ data_net: { get_resource: data_net }
+ data_subnet: { get_resource: data_subnet }
+{% endif %}
+{% endif %}
+ secgrp:
+{% if openstack_flat_secgrp|default(False)|bool %}
+ - { get_resource: flat-secgrp }
+{% else %}
+ - { get_resource: master-secgrp }
+ - { get_resource: node-secgrp }
+{% if num_etcd|int == 0 %}
+ - { get_resource: etcd-secgrp }
+{% endif %}
+{% endif %}
+ - { get_resource: common-secgrp }
+ floating_network:
+ if:
+ - no_floating
+ - null
+ - {{ external_network }}
+{% if use_bastion|bool or provider_network %}
+ attach_float_net: false
+{% endif %}
+ volume_size: {{ master_volume_size }}
+{% if master_server_group_policies|length > 0 %}
+ scheduler_hints:
+ group: { get_resource: master_server_group }
+{% endif %}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+
+ compute_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_nodes }}
+ removal_policies:
+ - resource_list: {{ nodes_to_remove }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: sub_type_k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ sub_type_k8s_type: {{ node_hostname | default('app-node') }}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: nodes
+ cluster_id: {{ stack_name }}
+ type: node
+ subtype: app
+ node_labels:
+{% for k, v in openshift_cluster_node_labels.app.iteritems() %}
+ {{ k|e }}: {{ v|e }}
+{% endfor %}
+ image: {{ openstack_node_image | default(openstack_image) }}
+ flavor: {{ node_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net: true
+ data_net: { get_resource: data_net }
+ data_subnet: { get_resource: data_subnet }
+{% endif %}
+{% endif %}
+ secgrp:
+ - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} }
+ - { get_resource: common-secgrp }
+ floating_network:
+ if:
+ - no_floating
+ - null
+ - {{ external_network }}
+{% if use_bastion|bool or provider_network %}
+ attach_float_net: false
+{% endif %}
+ volume_size: {{ node_volume_size }}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+
+ infra_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_infra }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: sub_type_k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ sub_type_k8s_type: {{ infra_hostname | default('infranode') }}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: infra
+ cluster_id: {{ stack_name }}
+ type: node
+ subtype: infra
+ node_labels:
+{% for k, v in openshift_cluster_node_labels.infra.iteritems() %}
+ {{ k|e }}: {{ v|e }}
+{% endfor %}
+ image: {{ openstack_infra_image | default(openstack_image) }}
+ flavor: {{ infra_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net: true
+ data_net: { get_resource: data_net }
+ data_subnet: { get_resource: data_subnet }
+{% endif %}
+{% endif %}
+ secgrp:
+# TODO(bogdando) filter only required node rules into infra-secgrp
+{% if openstack_flat_secgrp|default(False)|bool %}
+ - { get_resource: flat-secgrp }
+{% else %}
+ - { get_resource: node-secgrp }
+{% endif %}
+{% if ui_ssh_tunnel|bool and num_masters|int < 2 %}
+ - { get_resource: lb-secgrp }
+{% endif %}
+ - { get_resource: infra-secgrp }
+ - { get_resource: common-secgrp }
+{% if not provider_network %}
+ floating_network: {{ external_network }}
+{% endif %}
+ volume_size: {{ infra_volume_size }}
+{% if infra_server_group_policies|length > 0 %}
+ scheduler_hints:
+ group: { get_resource: infra_server_group }
+{% endif %}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+
+{% if num_dns|int > 0 %}
+ dns:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_dns }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: {{ dns_hostname | default('dns') }}
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: dns
+ cluster_id: {{ stack_name }}
+ type: dns
+ image: {{ openstack_dns_image | default(openstack_image) }}
+ flavor: {{ dns_flavor }}
+ key_name: {{ ssh_public_key }}
+{% if provider_network %}
+ net: {{ provider_network }}
+ net_name: {{ provider_network }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+{% endif %}
+ secgrp:
+ - { get_resource: dns-secgrp }
+ - { get_resource: common-secgrp }
+{% if not provider_network %}
+ floating_network: {{ external_network }}
+{% endif %}
+ volume_size: {{ dns_volume_size }}
+{% if not provider_network %}
+ depends_on:
+ - interface
+{% endif %}
+{% endif %}
diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2
new file mode 100644
index 000000000..9ffe721a5
--- /dev/null
+++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2
@@ -0,0 +1,270 @@
+heat_template_version: 2016-10-14
+
+description: OpenShift cluster server
+
+parameters:
+
+ name:
+ type: string
+ label: Name
+ description: Name
+
+ group:
+ type: string
+ label: Host Group
+ description: The Primary Ansible Host Group
+ default: host
+
+ cluster_env:
+ type: string
+ label: Cluster environment
+ description: Environment of the cluster
+
+ cluster_id:
+ type: string
+ label: Cluster ID
+ description: Identifier of the cluster
+
+ type:
+ type: string
+ label: Type
+ description: Type master or node
+
+ subtype:
+ type: string
+ label: Sub-type
+ description: Sub-type compute or infra for nodes, default otherwise
+ default: default
+
+ key_name:
+ type: string
+ label: Key name
+ description: Key name of keypair
+
+ image:
+ type: string
+ label: Image
+ description: Name of the image
+
+ flavor:
+ type: string
+ label: Flavor
+ description: Name of the flavor
+
+ net:
+ type: string
+ label: Net ID
+ description: Net resource
+
+ net_name:
+ type: string
+ label: Net name
+ description: Net name
+
+{% if not provider_network %}
+ subnet:
+ type: string
+ label: Subnet ID
+ description: Subnet resource
+{% endif %}
+
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net:
+ type: boolean
+ default: false
+ label: Attach-data-net
+ description: A switch for data port connection
+
+ data_net:
+ type: string
+ default: ''
+ label: Net ID
+ description: Net resource
+
+{% if not provider_network %}
+ data_subnet:
+ type: string
+ default: ''
+ label: Subnet ID
+ description: Subnet resource
+{% endif %}
+{% endif %}
+
+ secgrp:
+ type: comma_delimited_list
+ label: Security groups
+ description: Security group resources
+
+ attach_float_net:
+ type: boolean
+ default: true
+
+ label: Attach-float-net
+ description: A switch for floating network port connection
+
+{% if not provider_network %}
+ floating_network:
+ type: string
+ default: ''
+ label: Floating network
+ description: Network to allocate floating IP from
+{% endif %}
+
+ availability_zone:
+ type: string
+ description: The Availability Zone to launch the instance.
+ default: nova
+
+ volume_size:
+ type: number
+ description: Size of the volume to be created.
+ default: 1
+ constraints:
+ - range: { min: 1, max: 1024 }
+ description: must be between 1 and 1024 Gb.
+
+ node_labels:
+ type: json
+ description: OpenShift Node Labels
+ default: {"region": "default" }
+
+ scheduler_hints:
+ type: json
+ description: Server scheduler hints.
+ default: {}
+
+outputs:
+
+ name:
+ description: Name of the server
+ value: { get_attr: [ server, name ] }
+
+ private_ip:
+ description: Private IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+ - 0
+ - addr
+
+ floating_ip:
+ description: Floating IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+{% if provider_network %}
+ - 0
+{% else %}
+ - 1
+{% endif %}
+ - addr
+
+conditions:
+ no_floating: {not: { get_param: attach_float_net} }
+{% if openshift_use_flannel|default(False)|bool %}
+ no_data_subnet: {not: { get_param: attach_data_net} }
+{% endif %}
+
+resources:
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: name }
+ key_name: { get_param: key_name }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+{% if openshift_use_flannel|default(False)|bool %}
+ if:
+ - no_data_subnet
+{% if use_trunk_ports|default(false)|bool %}
+ - - port: { get_attr: [trunk-port, port_id] }
+{% else %}
+ - - port: { get_resource: port }
+{% endif %}
+{% if use_trunk_ports|default(false)|bool %}
+ - - port: { get_attr: [trunk-port, port_id] }
+{% else %}
+ - - port: { get_resource: port }
+ - port: { get_resource: data_port }
+{% endif %}
+
+{% else %}
+{% if use_trunk_ports|default(false)|bool %}
+ - port: { get_attr: [trunk-port, port_id] }
+{% else %}
+ - port: { get_resource: port }
+{% endif %}
+{% endif %}
+ user_data:
+ get_file: user-data
+ user_data_format: RAW
+ user_data_update_policy: IGNORE
+ metadata:
+ group: { get_param: group }
+ environment: { get_param: cluster_env }
+ clusterid: { get_param: cluster_id }
+ host-type: { get_param: type }
+ sub-host-type: { get_param: subtype }
+ node_labels: { get_param: node_labels }
+ scheduler_hints: { get_param: scheduler_hints }
+
+{% if use_trunk_ports|default(false)|bool %}
+ trunk-port:
+ type: OS::Neutron::Trunk
+ properties:
+ name: { get_param: name }
+ port: { get_resource: port }
+{% endif %}
+
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: net }
+{% if not provider_network %}
+ fixed_ips:
+ - subnet: { get_param: subnet }
+{% endif %}
+ security_groups: { get_param: secgrp }
+
+{% if openshift_use_flannel|default(False)|bool %}
+ data_port:
+ type: OS::Neutron::Port
+ condition: { not: no_data_subnet }
+ properties:
+ network: { get_param: data_net }
+ port_security_enabled: false
+{% if not provider_network %}
+ fixed_ips:
+ - subnet: { get_param: data_subnet }
+{% endif %}
+{% endif %}
+
+{% if not provider_network %}
+ floating-ip:
+ condition: { not: no_floating }
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: floating_network }
+ port_id: { get_resource: port }
+{% endif %}
+
+{% if not ephemeral_volumes|default(false)|bool %}
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: { get_param: volume_size }
+ availability_zone: { get_param: availability_zone }
+
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: cinder_volume }
+ instance_uuid: { get_resource: server }
+ mountpoint: /dev/sdb
+{% endif %}
diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2
new file mode 100644
index 000000000..eb65f7cec
--- /dev/null
+++ b/roles/openstack-stack/templates/user_data.j2
@@ -0,0 +1,13 @@
+#cloud-config
+disable_root: true
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/roles/openstack-stack/test/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml
new file mode 100644
index 000000000..d80472193
--- /dev/null
+++ b/roles/openstack-stack/test/stack-create-test.yml
@@ -0,0 +1,18 @@
+---
+- hosts: localhost
+ gather_facts: True
+ become: False
+ roles:
+ - role: openstack-stack
+ stack_name: test-stack
+ dns_domain: "{{ public_dns_domain }}"
+ dns_nameservers: "{{ public_dns_nameservers }}"
+ subnet_prefix: "{{ openstack_subnet_prefix }}"
+ ssh_public_key: "{{ openstack_ssh_public_key }}"
+ openstack_image: "{{ openstack_default_image_name }}"
+ etcd_flavor: "{{ openstack_default_flavor }}"
+ master_flavor: "{{ openstack_default_flavor }}"
+ node_flavor: "{{ openstack_default_flavor }}"
+ infra_flavor: "{{ openstack_default_flavor }}"
+ dns_flavor: "{{ openstack_default_flavor }}"
+ external_network: "{{ openstack_external_network_name }}"
diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml
new file mode 100644
index 000000000..871700f8c
--- /dev/null
+++ b/roles/static_inventory/defaults/main.yml
@@ -0,0 +1,29 @@
+---
+# Either to checkpoint the dynamic inventory into a static one
+refresh_inventory: True
+inventory: static
+inventory_path: ~/openstack-inventory
+
+# Either to configure bastion
+use_bastion: true
+
+# SSH user/key/options to access hosts via bastion
+ssh_user: openshift
+ssh_options: >-
+ -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s
+ -o ServerAliveInterval=30 -o GSSAPIAuthentication=no
+
+# SSH key to access nodes
+private_ssh_key: ~/.ssh/openshift
+
+# The patch to store the generated config to access bastion/hosts
+ssh_config_path: /tmp/ssh.config.ansible
+
+# The IP:port to make an SSH tunnel to access UI on the 1st master
+# via bastion node (requires sudo on the ansible control node)
+ui_ssh_tunnel: False
+ui_port: "{{ openshift_master_api_port | default(8443) }}"
+target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}"
+
+openstack_private_network: private
diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml
new file mode 100644
index 000000000..fdda41bb3
--- /dev/null
+++ b/roles/static_inventory/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: common
diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml
new file mode 100644
index 000000000..c0365bd3d
--- /dev/null
+++ b/roles/static_inventory/tasks/checkpoint.yml
@@ -0,0 +1,17 @@
+---
+- name: check for static inventory dir
+ stat:
+ path: "{{ inventory_path }}"
+ register: stat_inventory_path
+
+- name: create static inventory dir
+ file:
+ path: "{{ inventory_path }}"
+ state: directory
+ mode: 0750
+ when: not stat_inventory_path.stat.exists
+
+- name: create inventory from template
+ template:
+ src: inventory.j2
+ dest: "{{ inventory_path }}/hosts"
diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml
new file mode 100644
index 000000000..826efe78d
--- /dev/null
+++ b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml
@@ -0,0 +1,15 @@
+---
+- name: Add all new app nodes to new_app_nodes
+ when:
+ - 'oc_old_app_nodes is defined'
+ - 'oc_old_app_nodes | list'
+ - 'node.name not in oc_old_app_nodes'
+ - 'node["metadata"]["sub-host-type"] == "app"'
+ register: result
+ set_fact:
+ new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]'
+
+- name: If the node was added to new_nodes, remove it from registered nodes
+ set_fact:
+ registered_nodes: '{{ registered_nodes | difference([ node ]) }}'
+ when: 'not result | skipped'
diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml
new file mode 100644
index 000000000..3dab62df2
--- /dev/null
+++ b/roles/static_inventory/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Remove any existing inventory
+ file:
+ path: "{{ inventory_path }}/hosts"
+ state: absent
+
+- name: Refresh the inventory
+ meta: refresh_inventory
+
+- name: Generate in-memory inventory
+ include: openstack.yml
+
+- name: Checkpoint in-memory data into a static inventory
+ include: checkpoint.yml
+
+- name: Generate SSH config for accessing hosts via bastion
+ include: sshconfig.yml
+ when: use_bastion|bool
+
+- name: Configure SSH tunneling to access UI
+ include: sshtun.yml
+ become: true
+ when:
+ - use_bastion|bool
+ - ui_ssh_tunnel|bool
diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml
new file mode 100644
index 000000000..adf78c966
--- /dev/null
+++ b/roles/static_inventory/tasks/openstack.yml
@@ -0,0 +1,120 @@
+---
+- no_log: true
+ block:
+ - name: fetch all nodes from openstack shade dynamic inventory
+ command: shade-inventory --list
+ register: registered_nodes_output
+ when: refresh_inventory|bool
+
+ - name: set fact for openstack inventory cluster nodes
+ set_fact:
+ registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}"
+ vars:
+ q: "[] | [?metadata.clusterid=='{{stack_name}}']"
+ when:
+ - refresh_inventory|bool
+
+ - name: set_fact for openstack inventory nodes
+ set_fact:
+ registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}"
+ registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}"
+ vars:
+ q: "[] | [?metadata.group=='infra.{{stack_name}}']"
+ q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']"
+ when:
+ - refresh_inventory|bool
+
+ - name: set_fact for openstack inventory nodes with provider network
+ set_fact:
+ registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}"
+ vars:
+ q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']"
+ when:
+ - refresh_inventory|bool
+ - openstack_provider_network_name|default(None)
+
+ - name: Add cluster nodes w/o floating IPs to inventory
+ with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}"
+ add_host:
+ name: '{{ item.name }}'
+ ansible_host: >-
+ {% if use_bastion|bool -%}
+ {{ item.name }}
+ {%- else -%}
+ {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%}
+ {{ node[0].addresses[openstack_private_network|quote][0].addr }}
+ {%- endif %}
+ ansible_fqdn: '{{ item.name }}'
+ ansible_user: '{{ ssh_user }}'
+ ansible_private_key_file: '{{ private_ssh_key }}'
+ ansible_ssh_extra_args: '-F {{ ssh_config_path }}'
+ private_v4: >-
+ {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%}
+ {{ node[0].addresses[openstack_private_network|quote][0].addr }}
+
+ - name: Add cluster nodes with floating IPs to inventory
+ with_items: "{{ registered_nodes_floating }}"
+ add_host:
+ name: '{{ item.name }}'
+ ansible_host: >-
+ {% if use_bastion|bool -%}
+ {{ item.name }}
+ {%- elif openstack_provider_network_name|default(None) -%}
+ {{ item.private_v4 }}
+ {%- else -%}
+ {{ item.public_v4 }}
+ {%- endif %}
+ ansible_fqdn: '{{ item.name }}'
+ ansible_user: '{{ ssh_user }}'
+ ansible_private_key_file: '{{ private_ssh_key }}'
+ ansible_ssh_extra_args: '-F {{ ssh_config_path }}'
+ private_v4: >-
+ {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%}
+ {{ node[0].addresses[openstack_private_network|quote][0].addr }}
+ public_v4: >-
+ {% if openstack_provider_network_name|default(None) -%}
+ {{ item.private_v4 }}
+ {%- else -%}
+ {{ item.public_v4 }}
+ {%- endif %}
+
+ # Split registered_nodes into old nodes and new app nodes
+ # Add new app nodes to new_nodes host group for upscaling
+ - name: Create new_app_nodes variable
+ set_fact:
+ new_app_nodes: []
+
+ - name: Filter new app nodes out of registered_nodes
+ include: filter_out_new_app_nodes.yaml
+ with_items: "{{ registered_nodes }}"
+ loop_control:
+ loop_var: node
+
+ - name: Add new app nodes to the new_nodes section (if a deployment already exists)
+ with_items: "{{ new_app_nodes }}"
+ add_host:
+ name: "{{ item.name }}"
+ groups: new_nodes, app
+
+ - name: Add the rest of cluster nodes to their corresponding groups
+ with_items: "{{ registered_nodes }}"
+ add_host:
+ name: '{{ item.name }}'
+ groups: '{{ item.metadata.group }}'
+
+ - name: Add bastion node to inventory
+ add_host:
+ name: bastion
+ groups: bastions
+ ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}'
+ ansible_fqdn: '{{ registered_bastion_nodes[0].name }}'
+ ansible_user: '{{ ssh_user }}'
+ ansible_private_key_file: '{{ private_ssh_key }}'
+ ansible_ssh_extra_args: '-F {{ ssh_config_path }}'
+ private_v4: >-
+ {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%}
+ {{ node[0].addresses[openstack_private_network|quote][0].addr }}
+ public_v4: '{{ registered_bastion_nodes[0].public_v4 }}'
+ when:
+ - registered_bastion_nodes is defined
+ - use_bastion|bool
diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml
new file mode 100644
index 000000000..7119fe6ff
--- /dev/null
+++ b/roles/static_inventory/tasks/sshconfig.yml
@@ -0,0 +1,13 @@
+---
+- name: set ssh proxy command prefix for accessing nodes via bastion
+ set_fact:
+ ssh_proxy_command: >-
+ ssh {{ ssh_options }}
+ -i {{ private_ssh_key }}
+ {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }}
+
+- name: regenerate ssh config
+ template:
+ src: openstack_ssh_config.j2
+ dest: "{{ ssh_config_path }}"
+ mode: 0644
diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml
new file mode 100644
index 000000000..b0e4c832c
--- /dev/null
+++ b/roles/static_inventory/tasks/sshtun.yml
@@ -0,0 +1,15 @@
+---
+- name: Create ssh tunnel systemd service
+ template:
+ src: ssh-tunnel.service.j2
+ dest: /etc/systemd/system/ssh-tunnel.service
+ mode: 0644
+
+- name: reload the systemctl daemon after file update
+ command: systemctl daemon-reload
+
+- name: Enable ssh tunnel service
+ service:
+ name: ssh-tunnel
+ enabled: true
+ state: restarted
diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2
new file mode 100644
index 000000000..9dfbe3a5b
--- /dev/null
+++ b/roles/static_inventory/templates/inventory.j2
@@ -0,0 +1,104 @@
+# BEGIN Autogenerated hosts
+{% for host in groups['all'] %}
+{% if hostvars[host].get('ansible_connection', '') == 'local' %}
+{{ host }} ansible_connection=local
+{% else %}
+
+{{ host }}{% if 'ansible_host' in hostvars[host]
+%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %}
+{% if 'private_v4' in hostvars[host]
+%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %}
+{% if 'public_v4' in hostvars[host]
+%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %}
+{% if 'ansible_user' in hostvars[host]
+%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %}
+{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file']
+%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %}
+{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host]
+%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }}
+
+{% endif %}
+{% endfor %}
+# END autogenerated hosts
+
+#[all:vars]
+# For all group_vars, see ./group_vars/all.yml
+[infra_hosts:vars]
+openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }}
+
+[app:vars]
+openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }}
+
+# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups.
+# The lb group lets Ansible configure HAProxy as the load balancing solution.
+# Comment lb out if your load balancer is pre-configured.
+[cluster_hosts:children]
+OSEv3
+dns
+
+[OSEv3:children]
+nodes
+etcd
+lb
+new_nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+
+# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml
+
+{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %}
+openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}"
+openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi"
+{% endif %}
+
+
+# Host Groups
+
+[masters:children]
+masters.{{ stack_name }}
+
+[etcd:children]
+etcd.{{ stack_name }}
+{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %}
+
+[nodes:children]
+masters
+infra.{{ stack_name }}
+nodes.{{ stack_name }}
+
+[infra_hosts:children]
+infra.{{ stack_name }}
+
+[app:children]
+nodes.{{ stack_name }}
+
+[dns:children]
+dns.{{ stack_name }}
+
+[lb:children]
+lb.{{ stack_name }}
+
+[new_nodes:children]
+
+# Empty placeholders for all groups of the cluster nodes
+[masters.{{ stack_name }}]
+[etcd.{{ stack_name }}]
+[infra.{{ stack_name }}]
+[nodes.{{ stack_name }}]
+[app.{{ stack_name }}]
+[dns.{{ stack_name }}]
+[lb.{{ stack_name }}]
+[new_nodes.{{ stack_name }}]
+
+# BEGIN Autogenerated groups
+{% for group in groups %}
+{% if group not in ['ungrouped', 'all'] %}
+[{{ group }}]
+{% for host in groups[group] %}
+{{ host }}
+{% endfor %}
+
+{% endif %}
+{% endfor %}
+# END Autogenerated groups
diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2
new file mode 100644
index 000000000..ad5d1253a
--- /dev/null
+++ b/roles/static_inventory/templates/openstack_ssh_config.j2
@@ -0,0 +1,21 @@
+Host *
+ IdentitiesOnly yes
+
+Host bastion
+ Hostname {{ hostvars['bastion'].ansible_host }}
+ IdentityFile {{ hostvars['bastion'].ansible_private_key_file }}
+ User {{ ssh_user }}
+ StrictHostKeyChecking no
+ UserKnownHostsFile=/dev/null
+
+{% for host in groups['all'] | difference(groups['bastions'][0]) %}
+
+Host {{ host }}
+ Hostname {{ hostvars[host].ansible_host }}
+ ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22
+ IdentityFile {{ hostvars[host].ansible_private_key_file }}
+ User {{ ssh_user }}
+ StrictHostKeyChecking no
+ UserKnownHostsFile=/dev/null
+
+{% endfor %}
diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2
new file mode 100644
index 000000000..0d1cf8f79
--- /dev/null
+++ b/roles/static_inventory/templates/ssh-tunnel.service.j2
@@ -0,0 +1,20 @@
+[Unit]
+Description=Set up ssh tunneling for OpenShift cluster UI
+After=network.target
+
+[Service]
+ExecStart=/usr/bin/ssh -NT -o \
+ ServerAliveInterval=60 -o \
+ UserKnownHostsFile=/dev/null -o \
+ StrictHostKeyChecking=no -o \
+ ExitOnForwardFailure=no -i \
+ {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \
+ -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }}
+
+
+# Restart every >2 seconds to avoid StartLimitInterval failure
+RestartSec=5
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md
new file mode 100644
index 000000000..748de282c
--- /dev/null
+++ b/roles/subscription-manager/README.md
@@ -0,0 +1,156 @@
+# Red Hat Subscription Manager Ansible Role
+
+## Parameters
+
+This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are:
+
+### rhsm_satellite
+
+Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false.
+
+Default: none
+
+### rhsm_username
+
+Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this.
+
+Default: none
+
+### rhsm_password
+
+Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this.
+
+NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes.
+
+1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section:
+
+ ```
+ - hosts: localhost
+ # Add the following lines after a -hosts: declaration and before pre_tasks:
+ # Start of vars_prompt code block
+ vars_prompt:
+ - name: "rhsm_password"
+ prompt: "Subscription Manager password"
+ confirm: yes
+ private: yes
+ # End of vars_prompt code block
+ pre_tasks:
+ ```
+
+2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well):
+ 1. Create a file to contain the variable such as **secrets.yml**:
+
+ ```
+ ---
+ rhsm_password: "my_secret_password"
+ # other variables can optionally be placed here as well
+ ```
+
+ 2. Encrypt the file with **ansible-vault**:
+
+ ```
+ $ ansible-vault encrypt secrets.yml
+ Vault password:
+ Confirm Vault password:
+ Encryption successful
+ ```
+
+ 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such:
+
+ ```
+ $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" <other playbook options>
+ ```
+
+ NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible.
+
+Default: none
+
+### rhsm_org
+
+Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted.
+
+Default: none
+
+### rhsm_activationkey
+
+Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead.
+
+Default: none
+
+### rhsm_pool
+
+Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option.
+
+Default: none
+
+### rhsm_repos
+
+Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite
+
+NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such:
+
+rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]'
+
+Default: none
+
+## Calling This Role
+Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**.
+
+### vars_prompt
+Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details.
+
+To Add a prompt to capture **rhsm_password**:
+
+```
+- hosts: localhost
+ # Add the following lines after a -hosts: declaration and before pre_tasks:
+ # Start of vars_prompt code block
+ vars_prompt:
+ - name: "rhsm_password"
+ prompt: "Subscription Manager password"
+ confirm: yes
+ private: yes
+ # End of vars_prompt code block
+ pre_tasks:
+```
+
+### pre-tasks
+
+A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles:
+
+```
+pre_tasks:
+- include: roles/subscription-manager/pre_tasks/pre_tasks.yml
+```
+
+### roles
+
+The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such:
+
+```
+roles:
+ - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' }
+```
+
+## Running Playbooks with this Role
+
+- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history):
+
+ ```
+ $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password <other playbook otions>"
+ ```
+
+- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password:
+
+ ```
+ $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" <other playbook options>
+
+ ```
+
+- To register to a Satellite server with an activation key:
+
+ ```
+ $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 <other playbook options>"
+
+ ```
+- To ignore any Subscription Manager activities, simply do not set any parameters.
diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml
new file mode 100644
index 000000000..464670fc0
--- /dev/null
+++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml
@@ -0,0 +1,45 @@
+---
+- name: "Set password fact"
+ set_fact:
+ rhsm_password: "{{ rhsm_password | default(None) }}"
+ no_log: true
+
+- name: "Initialize Subscription Manager fact"
+ set_fact:
+ rhsm_register: true
+
+- name: "Determine if Subscription Manager should be used"
+ set_fact:
+ rhsm_register: false
+ when:
+ - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == ''
+ - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == ''
+ - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == ''
+ - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == ''
+ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == ''
+ - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == ''
+
+- name: "Validate Subscription Manager organization is set"
+ fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'"
+ when:
+ - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == ''
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+ - rhsm_register
+
+- name: "Validate Subscription Manager authentication is defined"
+ fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password"
+ when:
+ - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '')
+ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == ''
+ - rhsm_register
+
+- name: "Validate activation key and Hosted are not requested together"
+ fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'"
+ when:
+ - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == ''
+ - rhsm_activationkey is defined
+ - rhsm_activationkey is not none
+ - rhsm_activationkey|trim != ''
+ - rhsm_register
diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml
new file mode 100644
index 000000000..e4c9fdffb
--- /dev/null
+++ b/roles/subscription-manager/tasks/main.yml
@@ -0,0 +1,150 @@
+---
+- name: "Initialize rhsm_password variable if vars_prompt was used"
+ set_fact:
+ rhsm_password: "{{ hostvars.localhost.rhsm_password }}"
+ when:
+ - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == ''
+
+- name: "Initializing Subscription Manager authentication method"
+ set_fact:
+ rhsm_authentication: false
+
+# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set
+- name: "Setting Subscription Manager Activation Key Fact"
+ set_fact:
+ rhsm_authentication: "key"
+ when:
+ - rhsm_activationkey is defined
+ - rhsm_activationkey is not none
+ - rhsm_activationkey|trim != ''
+ - not rhsm_authentication
+
+# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password
+- name: "Setting Subscription Manager Username and Password Fact"
+ set_fact:
+ rhsm_authentication: "password"
+ when:
+ - rhsm_username is defined
+ - rhsm_username is not none
+ - rhsm_username|trim != ''
+ - rhsm_password is defined
+ - rhsm_password is not none
+ - rhsm_password|trim != ''
+ - not rhsm_authentication
+
+- name: "Initializing registration status"
+ set_fact:
+ registered: false
+
+- name: "Checking subscription status (a failure means it is not registered and will be)"
+ command: "/usr/bin/subscription-manager status"
+ ignore_errors: yes
+ changed_when: no
+ register: check_if_registered
+
+- name: "Set registration fact if system is already registered"
+ set_fact:
+ registered: true
+ when: check_if_registered.rc == 0
+
+- name: "Cleaning any old subscriptions"
+ command: "/usr/bin/subscription-manager clean"
+ when:
+ - not registered
+ - rhsm_authentication is defined
+ register: cleaningsubs_result
+ until: cleaningsubs_result.rc == 0
+ retries: 10
+ delay: 1
+
+- name: "Install Satellite certificate"
+ command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ when:
+ - not registered
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+
+- name: "Register to Satellite using activation key"
+ command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'"
+ when:
+ - not registered
+ - rhsm_authentication == 'key'
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+ register: register_key_result
+ until: register_key_result.rc == 0
+ retries: 10
+ delay: 1
+
+# This can apply to either Hosted or Satellite
+- name: "Register using username and password"
+ command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}"
+ no_log: true
+ when:
+ - not registered
+ - rhsm_authentication == "password"
+ - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == ''
+ register: register_userpw_result
+ until: register_userpw_result.rc == 0
+ retries: 10
+ delay: 1
+
+# This can apply to either Hosted or Satellite
+- name: "Register using username, password and organization"
+ command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}"
+ no_log: true
+ when:
+ - not registered
+ - rhsm_authentication == "password"
+ - rhsm_org is defined
+ - rhsm_org is not none
+ - rhsm_org|trim != ''
+ register: register_userpworg_result
+ until: register_userpworg_result.rc == 0
+ retries: 10
+ delay: 1
+
+- name: "Auto-attach to Subscription Manager Pool"
+ command: "/usr/bin/subscription-manager attach --auto"
+ when:
+ - not registered
+ - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == ''
+ register: autoattach_result
+ until: autoattach_result.rc == 0
+ retries: 10
+ delay: 1
+
+- name: "Attach to a specific pool"
+ command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}"
+ when:
+ - rhsm_pool is defined
+ - rhsm_pool is not none
+ - rhsm_pool|trim != ''
+ - not registered
+ register: attachpool_result
+ until: attachpool_result.rc == 0
+ retries: 10
+ delay: 1
+
+- name: "Disable all repositories"
+ command: "/usr/bin/subscription-manager repos --disable=*"
+ when:
+ - not registered
+ - rhsm_repos is defined
+ - rhsm_repos is not none
+ - rhsm_repos|trim != ''
+
+- name: "Enable specified repositories"
+ command: "/usr/bin/subscription-manager repos --enable={{ item }}"
+ with_items: "{{ rhsm_repos }}"
+ when:
+ - not registered
+ - rhsm_repos is defined
+ - rhsm_repos is not none
+ - rhsm_repos|trim != ''
+ register: enablerepos_result
+ until: enablerepos_result.rc == 0
+ retries: 10
+ delay: 1