summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_OSE.md191
-rw-r--r--README_libvirt.md78
-rwxr-xr-xbin/cluster91
-rw-r--r--filter_plugins/oo_filters.py16
-rw-r--r--inventory/aws/group_vars/all2
-rw-r--r--inventory/aws/hosts/ec2.ini (renamed from inventory/aws/ec2.ini)0
-rwxr-xr-xinventory/aws/hosts/ec2.py (renamed from inventory/aws/ec2.py)0
-rw-r--r--inventory/aws/hosts/hosts1
-rw-r--r--inventory/byo/group_vars/all28
-rw-r--r--inventory/byo/hosts26
-rw-r--r--inventory/gce/group_vars/all2
-rwxr-xr-xinventory/gce/hosts/gce.py (renamed from inventory/gce/gce.py)0
-rw-r--r--inventory/gce/hosts/hosts1
-rw-r--r--inventory/libvirt/group_vars/all2
-rw-r--r--inventory/libvirt/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/libvirt.ini20
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py179
-rw-r--r--playbooks/aws/openshift-cluster/config.yml36
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml73
-rw-r--r--playbooks/aws/openshift-cluster/list.yml15
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/aws/openshift-cluster/launch_instances.yml)20
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml24
-rw-r--r--playbooks/aws/openshift-cluster/update.yml25
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml19
-rw-r--r--playbooks/aws/openshift-master/config.yml27
-rw-r--r--playbooks/aws/openshift-master/launch.yml8
-rw-r--r--playbooks/aws/openshift-master/terminate.yml17
-rw-r--r--playbooks/aws/openshift-master/vars.yml3
-rw-r--r--playbooks/aws/openshift-node/config.yml110
-rw-r--r--playbooks/aws/openshift-node/launch.yml10
-rw-r--r--playbooks/aws/openshift-node/terminate.yml17
-rw-r--r--playbooks/aws/openshift-node/vars.yml3
-rw-r--r--playbooks/byo/openshift-master/config.yml20
-rw-r--r--playbooks/byo/openshift-node/config.yml90
-rw-r--r--playbooks/byo/openshift_facts.yml10
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
l---------playbooks/common/openshift-cluster/filter_plugins (renamed from playbooks/libvirt/openshift-master/filter_plugins)0
l---------playbooks/common/openshift-cluster/roles (renamed from playbooks/libvirt/openshift-master/roles)0
-rw-r--r--playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml19
l---------playbooks/common/openshift-master/filter_plugins (renamed from playbooks/libvirt/openshift-node/filter_plugins)0
l---------playbooks/common/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-node/config.yml121
l---------playbooks/common/openshift-node/filter_plugins1
l---------playbooks/common/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-cluster/config.yml37
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml72
-rw-r--r--playbooks/gce/openshift-cluster/list.yml15
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/gce/openshift-cluster/launch_instances.yml)26
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml22
-rw-r--r--playbooks/gce/openshift-cluster/update.yml25
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml14
-rw-r--r--playbooks/gce/openshift-master/config.yml24
-rw-r--r--playbooks/gce/openshift-master/launch.yml6
-rw-r--r--playbooks/gce/openshift-master/terminate.yml11
-rw-r--r--playbooks/gce/openshift-master/vars.yml3
-rw-r--r--playbooks/gce/openshift-node/config.yml106
-rw-r--r--playbooks/gce/openshift-node/launch.yml6
-rw-r--r--playbooks/gce/openshift-node/terminate.yml11
-rw-r--r--playbooks/gce/openshift-node/vars.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml38
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml81
-rw-r--r--playbooks/libvirt/openshift-cluster/launch_instances.yml102
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml50
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml104
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml (renamed from playbooks/libvirt/templates/domain.xml)13
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data23
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml69
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml38
-rw-r--r--playbooks/libvirt/openshift-master/config.yml21
-rw-r--r--playbooks/libvirt/openshift-master/vars.yml1
-rw-r--r--playbooks/libvirt/openshift-node/config.yml102
l---------playbooks/libvirt/openshift-node/roles1
-rw-r--r--playbooks/libvirt/openshift-node/vars.yml1
-rw-r--r--playbooks/libvirt/templates/meta-data2
-rw-r--r--playbooks/libvirt/templates/user-data10
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_common/vars/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py92
-rw-r--r--roles/openshift_master/tasks/main.yml64
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_node/tasks/main.yml32
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml3
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py63
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml64
-rw-r--r--roles/openshift_register_nodes/vars/main.yml7
-rw-r--r--roles/openshift_repos/README.md2
-rw-r--r--roles/openshift_repos/defaults/main.yaml5
-rw-r--r--roles/openshift_repos/files/online/epel7-kubernetes.repo6
-rw-r--r--roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta (renamed from roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta)0
-rw-r--r--roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release (renamed from roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release)0
-rw-r--r--roles/openshift_repos/files/online/repos/epel7-openshift.repo (renamed from roles/openshift_repos/files/online/epel7-openshift.repo)0
-rw-r--r--roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo (renamed from roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo)0
-rw-r--r--roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo (renamed from roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo)0
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo (renamed from roles/openshift_repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/tasks/main.yaml14
-rw-r--r--roles/openshift_repos/templates/yum_repo.j21
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml11
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml11
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py3
111 files changed, 1686 insertions, 1198 deletions
diff --git a/README_OSE.md b/README_OSE.md
index 6ebdb7f99..6d4a9ba92 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -7,15 +7,17 @@
* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
* [Running the ansible playbooks](#running-the-ansible-playbooks)
* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
* ansible
- * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+
+ * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
+ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1 VMs
-* ssh key based auth for the root user needs to be pre-configured from the host
- running ansible to the remote hosts
+* Either ssh key based auth for the root user or ssh key based auth for a user
+ with sudo access (no password)
* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
```sh
@@ -48,9 +50,6 @@ subscription-manager repos \
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
-* End-to-end testing has not been completed yet using this module
-* root user is used for all ansible actions; eventually we will support using
- a non-root user with sudo.
## Configuring the host inventory
[Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -64,6 +63,38 @@ option to ansible-playbook.
```ini
# This is an example of a bring your own (byo) host inventory
+# Create an OSEv3 group that contains the maters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
# host group for masters
[masters]
ose3-master.example.com
@@ -76,51 +107,13 @@ ose3-node[1:2].example.com
The hostnames above should resolve both from the hosts themselves and
the host where ansible is running (if different).
-## Creating the default variables for the hosts and host groups
-[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9)
-
-#### Group vars for all hosts
-/etc/ansible/group_vars/all:
-```yaml
----
-# Assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# Default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# Set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# To use the latest OpenShift Enterprise Errata puddle:
-#openshift_additional_repos:
-#- id: ose-devel
-# name: ose-devel
-# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-# enabled: 1
-# gpgcheck: 0
-# To use the latest OpenShift Enterprise Whitelist puddle:
-openshift_additional_repos:
-- id: ose-devel
- name: ose-devel
- baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
- enabled: 1
- gpgcheck: 0
-
-```
-
## Running the ansible playbooks
From the openshift-ansible checkout run:
```sh
ansible-playbook playbooks/byo/config.yml
```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts and the
-group_vars are defined in /etc/ansible/group_vars, if using a different
-inventory file (and a group_vars directory that is in the same directory as
-the directory as the inventory) use the -i option for ansible-playbook.
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
## Post-ansible steps
#### Create the default router
@@ -140,3 +133,109 @@ openshift ex registry --create=true \
--images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
--mount-host=/var/lib/openshift/docker-registry
```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "ip": "172.16.4.79",
+ "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.45",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ... <snip> ...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.42] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "ip": "172.16.4.75",
+ "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.42",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.36] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "ip": "172.16.4.73",
+ "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.36",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+ * Should resolve to the internal ip from the instances themselves.
+ * openshift_hostname will override.
+* ip
+ * Should be the internal ip of the instance.
+ * openshift_ip will override.
+* public hostname
+ * Should resolve to the external ip from hosts outside of the cloud
+ * provider openshift_public_hostname will override.
+* public_ip
+ * Should be the externally accessible ip associated with the instance
+ * openshift_public_ip will override
+* use_openshift_sdn
+ * Should be true unless the cloud is GCE.
+ * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```
diff --git a/README_libvirt.md b/README_libvirt.md
index fd2eb57f6..bcbaf4bd5 100644
--- a/README_libvirt.md
+++ b/README_libvirt.md
@@ -1,4 +1,3 @@
-
LIBVIRT Setup instructions
==========================
@@ -9,19 +8,21 @@ This makes `libvirt` useful to develop, test and debug Openshift and openshift-a
Install dependencies
--------------------
-1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2. Install [ebtables](http://ebtables.netfilter.org/)
-3. Install [qemu](http://wiki.qemu.org/Main_Page)
-4. Install [libvirt](http://libvirt.org/)
-5. Enable and start the libvirt daemon, e.g:
- * ``systemctl enable libvirtd``
- * ``systemctl start libvirtd``
-6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7. Check that your `$HOME` is accessible to the qemu user²
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+ - `systemctl enable libvirtd`
+ - `systemctl start libvirtd`
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+8. Configure dns resolution on the host³
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
You can test it with the following command:
+
```
virsh -c qemu:///system pool-list
```
@@ -67,12 +68,7 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat
error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
```
-In order to fix that issue, you have several possibilities:
-* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
- * backed by a filesystem with a lot of free disk space
- * writable by your user;
- * accessible by the qemu user.
-* Grant the qemu user access to the storage pool.
+In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
On Arch:
@@ -80,13 +76,55 @@ On Arch:
setfacl -m g:kvm:--x ~
```
-Test the setup
+#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
+
+- Verify NetworkManager is configured to use dnsmasq:
+
+```sh
+$ sudo vi /etc/NetworkManager/NetworkManager.conf
+[main]
+dns=dnsmasq
+```
+
+- Configure dnsmasq to use the Virtual Network router for example.com:
+
+```sh
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+```
+
+Test The Setup
--------------
+1. cd openshift-ansible/
+2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
+
+```
+ bin/cluster list libvirt ''
```
-cd openshift-ansible
-bin/cluster create -m 1 -n 3 libvirt lenaic
+Creating a cluster
+------------------
+
+1. To create a cluster with one master and two nodes
-bin/cluster terminate libvirt lenaic
+```
+ bin/cluster create libvirt lenaic
+```
+
+Updating a cluster
+------------------
+
+1. To update the cluster
+
+```
+ bin/cluster update libvirt lenaic
+```
+
+Terminating a cluster
+---------------------
+
+1. To terminate the cluster
+
+```
+ bin/cluster terminate libvirt lenaic
```
diff --git a/bin/cluster b/bin/cluster
index ca227721e..79f1f988f 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -22,13 +22,28 @@ class Cluster(object):
'-o ControlPersist=600s '
)
+ def get_deployment_type(self, args):
+ """
+ Get the deployment_type based on the environment variables and the
+ command line arguments
+ :param args: command line arguments provided by the user
+ :return: string representing the deployment type
+ """
+ deployment_type = 'origin'
+ if args.deployment_type:
+ deployment_type = args.deployment_type
+ elif 'OS_DEPLOYMENT_TYPE' in os.environ:
+ deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
+ return deployment_type
+
def create(self, args):
"""
Create an OpenShift cluster for given provider
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -43,7 +58,8 @@ class Cluster(object):
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -55,19 +71,34 @@ class Cluster(object):
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
return self.action(args, inventory, env, playbook)
+ def config(self, args):
+ """
+ Configure or reconfigure OpenShift across clustered VMs
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
def update(self, args):
"""
Update to latest OpenShift across clustered VMs
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -81,19 +112,19 @@ class Cluster(object):
"""
config = ConfigParser.ConfigParser()
if 'gce' == provider:
- config.readfp(open('inventory/gce/gce.ini'))
+ config.readfp(open('inventory/gce/hosts/gce.ini'))
for key in config.options('gce'):
os.environ[key] = config.get('gce', key)
- inventory = '-i inventory/gce/gce.py'
+ inventory = '-i inventory/gce/hosts'
elif 'aws' == provider:
- config.readfp(open('inventory/aws/ec2.ini'))
+ config.readfp(open('inventory/aws/hosts/ec2.ini'))
for key in config.options('ec2'):
os.environ[key] = config.get('ec2', key)
- inventory = '-i inventory/aws/ec2.py'
+ inventory = '-i inventory/aws/hosts'
elif 'libvirt' == provider:
inventory = '-i inventory/libvirt/hosts'
else:
@@ -145,29 +176,49 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
)
- parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+ parser.add_argument('-v', '--verbose', action='count',
+ help='Multiple -v options increase the verbosity')
parser.add_argument('--version', action='version', version='%(prog)s 0.2')
meta_parser = argparse.ArgumentParser(add_help=False)
meta_parser.add_argument('provider', choices=providers, help='provider')
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
-
- action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions')
-
- create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser])
- create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
- create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+ meta_parser.add_argument('-t', '--deployment-type',
+ choices=['origin', 'online', 'enterprise'],
+ help='Deployment type. (default: origin)')
+
+ action_parser = parser.add_subparsers(dest='action', title='actions',
+ description='Choose from valid actions')
+
+ create_parser = action_parser.add_parser('create', help='Create a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-m', '--masters', default=1, type=int,
+ help='number of masters to create in cluster')
+ create_parser.add_argument('-n', '--nodes', default=2, type=int,
+ help='number of nodes to create in cluster')
create_parser.set_defaults(func=cluster.create)
- terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser])
- terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation')
+ config_parser = action_parser.add_parser('config',
+ help='Configure or reconfigure a cluster',
+ parents=[meta_parser])
+ config_parser.set_defaults(func=cluster.config)
+
+ terminate_parser = action_parser.add_parser('terminate',
+ help='Destroy a cluster',
+ parents=[meta_parser])
+ terminate_parser.add_argument('-f', '--force', action='store_true',
+ help='Destroy cluster without confirmation')
terminate_parser.set_defaults(func=cluster.terminate)
- update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser])
- update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation')
+ update_parser = action_parser.add_parser('update',
+ help='Update OpenShift across cluster',
+ parents=[meta_parser])
+ update_parser.add_argument('-f', '--force', action='store_true',
+ help='Update cluster without confirmation')
update_parser.set_defaults(func=cluster.update)
- list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser])
+ list_parser = action_parser.add_parser('list', help='List VMs in cluster',
+ parents=[meta_parser])
list_parser.set_defaults(func=cluster.list)
args = parser.parse_args()
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 1cf02218c..cf30cde9a 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -5,6 +5,7 @@
from ansible import errors, runner
import json
import pdb
+import re
def oo_pdb(arg):
''' This pops you into a pdb instance where arg is the data passed in from the filter.
@@ -101,6 +102,18 @@ def oo_prepend_strings_in_list(data, prepend):
retval = [prepend + s for s in data]
return retval
+def oo_get_deployment_type_from_groups(data):
+ ''' This takes a list of groups and returns the associated
+ deployment-type
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ regexp = re.compile('^tag_deployment-type[-_]')
+ matches = filter(regexp.match, data)
+ if len(matches) > 0:
+ return regexp.sub('', matches[0])
+ return "Unknown"
+
class FilterModule (object):
def filters(self):
return {
@@ -109,5 +122,6 @@ class FilterModule (object):
"oo_flatten": oo_flatten,
"oo_len": oo_len,
"oo_pdb": oo_pdb,
- "oo_prepend_strings_in_list": oo_prepend_strings_in_list
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_get_deployment_type_from_groups": oo_get_deployment_type_from_groups
}
diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/aws/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/aws/ec2.ini b/inventory/aws/hosts/ec2.ini
index eaab0a410..eaab0a410 100644
--- a/inventory/aws/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
diff --git a/inventory/aws/ec2.py b/inventory/aws/hosts/ec2.py
index f231ff4c2..f231ff4c2 100755
--- a/inventory/aws/ec2.py
+++ b/inventory/aws/hosts/ec2.py
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
new file mode 100644
index 000000000..6c590ac93
--- /dev/null
+++ b/inventory/aws/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/byo/group_vars/all b/inventory/byo/group_vars/all
deleted file mode 100644
index d63e96668..000000000
--- a/inventory/byo/group_vars/all
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# lets assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# Use latest Errata puddle as an additional repo:
-#openshift_additional_repos:
-#- id: ose-devel
-# name: ose-devel
-# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-# enabled: 1
-# gpgcheck: 0
-
-# Use latest Whitelist puddle as an additional repo:
-openshift_additional_repos:
-- id: ose-devel
- name: ose-devel
- baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
- enabled: 1
- gpgcheck: 0
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
index 2dd854778..e9af5e571 100644
--- a/inventory/byo/hosts
+++ b/inventory/byo/hosts
@@ -1,5 +1,30 @@
# This is an example of a bring your own (byo) host inventory
+# Create an OSEv3 group that contains the maters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
# host group for masters
[masters]
ose3-master-ansible.test.example.com
@@ -7,4 +32,3 @@ ose3-master-ansible.test.example.com
# host group for nodes
[nodes]
ose3-node[1:2]-ansible.test.example.com
-
diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/gce/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/gce/gce.py b/inventory/gce/hosts/gce.py
index 3403f735e..3403f735e 100755
--- a/inventory/gce/gce.py
+++ b/inventory/gce/hosts/gce.py
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
new file mode 100644
index 000000000..6c590ac93
--- /dev/null
+++ b/inventory/gce/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/libvirt/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts
deleted file mode 100644
index 6a818f268..000000000
--- a/inventory/libvirt/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
new file mode 100644
index 000000000..9cdc31449
--- /dev/null
+++ b/inventory/libvirt/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini
new file mode 100644
index 000000000..62ff204dd
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt.ini
@@ -0,0 +1,20 @@
+# Ansible libvirt external inventory script settings
+#
+
+[libvirt]
+
+uri = qemu:///system
+
+# API calls to libvirt can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-libvirt.cache
+# - ansible-libvirt.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
new file mode 100755
index 000000000..0a98e2af3
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+
+"""
+libvirt external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+"""
+
+# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import argparse
+import ConfigParser
+import os
+import re
+import sys
+from time import time
+import libvirt
+import xml.etree.ElementTree as ET
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class LibvirtInventory(object):
+
+ def __init__(self):
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ if self.args.host:
+ print self.json_format_dict(self.get_host_info(), self.args.pretty)
+ elif self.args.list:
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+ else: # default action with no options
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+
+ def read_settings(self):
+ config = ConfigParser.SafeConfigParser()
+ config.read(
+ os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
+ )
+ self.libvirt_uri = config.get('libvirt', 'uri')
+
+ def parse_cli_args(self):
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on libvirt'
+ )
+ parser.add_argument(
+ '--list',
+ action='store_true',
+ default=True,
+ help='List instances (default: True)'
+ )
+ parser.add_argument(
+ '--host',
+ action='store',
+ help='Get all the variables about a specific instance'
+ )
+ parser.add_argument(
+ '--pretty',
+ action='store_true',
+ default=False,
+ help='Pretty format (default: False)'
+ )
+ self.args = parser.parse_args()
+
+ def get_host_info(self):
+ inventory = self.get_inventory()
+ if self.args.host in inventory['_meta']['hostvars']:
+ return inventory['_meta']['hostvars'][self.args.host]
+
+ def get_inventory(self):
+ inventory = dict(_meta=dict(hostvars=dict()))
+
+ conn = libvirt.openReadOnly(self.libvirt_uri)
+ if conn is None:
+ print "Failed to open connection to %s" % libvirt_uri
+ sys.exit(1)
+
+ domains = conn.listAllDomains()
+ if domains is None:
+ print "Failed to list domains for connection %s" % libvirt_uri
+ sys.exit(1)
+
+ arp_entries = self.parse_arp_entries()
+
+ for domain in domains:
+ hostvars = dict(libvirt_name=domain.name(),
+ libvirt_id=domain.ID(),
+ libvirt_uuid=domain.UUIDString())
+ domain_name = domain.name()
+
+ # TODO: add support for guests that are not in a running state
+ state, _ = domain.state()
+ # 2 is the state for a running guest
+ if state != 1:
+ continue
+
+ hostvars['libvirt_status'] = 'running'
+
+ root = ET.fromstring(domain.XMLDesc())
+ ns = {'ansible': 'https://github.com/ansible/ansible'}
+ for tag_elem in root.findall('./metadata/ansible:tag', ns):
+ tag = tag_elem.text
+ self.push(inventory, "tag_%s" % tag, domain_name)
+ self.push(hostvars, 'libvirt_tags', tag)
+
+ # TODO: support more than one network interface, also support
+ # interface types other than 'network'
+ interface = root.find("./devices/interface[@type='network']")
+ if interface is not None:
+ mac_elem = interface.find('mac')
+ if mac_elem is not None:
+ mac = mac_elem.get('address')
+ if mac in arp_entries:
+ ip_address = arp_entries[mac]['ip_address']
+ hostvars['ansible_ssh_host'] = ip_address
+ hostvars['libvirt_ip_address'] = ip_address
+
+ inventory['_meta']['hostvars'][domain_name] = hostvars
+
+ return inventory
+
+ def parse_arp_entries(self):
+ arp_entries = dict()
+ with open('/proc/net/arp', 'r') as f:
+ # throw away the header
+ f.readline()
+
+ for line in f:
+ ip_address, _, _, mac, _, device = line.strip().split()
+ arp_entries[mac] = dict(ip_address=ip_address, device=device)
+
+ return arp_entries
+
+ def push(self, my_dict, key, element):
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def json_format_dict(self, data, pretty=False):
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+LibvirtInventory()
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
new file mode 100644
index 000000000..b8961704e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 3561c1803..e7125ea0c 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -4,59 +4,26 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+ - fail:
+ msg: Deployment type not supported for libvirt provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 08e9e2df4..5c04bc320 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost'])
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}"
diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 9d645fbe5..58b4082df 100644
--- a/playbooks/aws/openshift-cluster/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -1,8 +1,9 @@
---
+# TODO: modify machine_image based on deployment_type
- set_fact:
- machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
- machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
- machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
+ machine_type: "{{ lookup('env', 'ec2_instance_type') | default('m3.large', true) }}"
+ machine_image: "{{ lookup('env', 'ec2_ami') | default(deployment_vars[deployment_type].image, true) }}"
+ machine_region: "{{ lookup('env', 'ec2_region') | default(deployment_vars[deployment_type].region, true) }}"
machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
@@ -25,6 +26,7 @@
env: "{{ env }}"
host-type: "{{ host_type }}"
env-host-type: "{{ env_host_type }}"
+ deployment-type: "{{ deployment_type }}"
register: ec2
- name: Add Name tag to instances
@@ -37,12 +39,14 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_deployment-type_{{ deployment_type }}
- name: Add new instances groups and variables
add_host:
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
@@ -54,10 +58,12 @@
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
retries: 20
delay: 10
- with_items: ec2.instances
+ with_together:
+ - instances
+ - ec2.instances
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 39607633a..1d2b60594 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -1,14 +1,26 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+ - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- include: ../openshift-node/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
- include: ../openshift-master/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 90ecdc6ab..5e7ab4e58 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ed97d539c..f0df3d6f5 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1 +1,20 @@
---
+deployment_vars:
+ origin:
+ # fedora, since centos requires marketplace
+ image: ami-acd999c4
+ region: us-east-1
+ ssh_user: fedora
+ sudo: yes
+ online:
+ # private ami
+ image: ami-307b3658
+ region: us-east-1
+ ssh_user: root
+ sudo: no
+ enterprise:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 1c4060eee..37ab4fbe6 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,24 +1,19 @@
---
-- name: Populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: Configure instances
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- openshift_use_openshift_sdn: False
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- #- openshift_sdn_master
- - pods
- - os_env_extras
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index 3d87879a0..6b3751682 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -40,7 +38,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -57,7 +55,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
index fd15cf00f..a790336b1 100644
--- a/playbooks/aws/openshift-master/terminate.yml
+++ b/playbooks/aws/openshift-master/terminate.yml
@@ -1,15 +1,15 @@
---
-- name: Populate oo_masters_to_terminate host group if needed
+- name: Populate oo_masters_to_terminate host group
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_terminate
+ add_host: name={{ item }} groups=oo_masters_to_terminate
+ with_items: oo_host_group_exp | default([])
-- name: Gather facts for instances to terminate
+- name: Gather dynamic inventory variables for hosts to terminate
hosts: oo_masters_to_terminate
+ gather_facts: no
- name: Terminate instances
hosts: localhost
@@ -27,11 +27,12 @@
ignore_errors: yes
register: ec2_term
with_items: host_vars
+ when: "'oo_masters_to_terminate' in groups"
# Fail if any of the instances failed to terminate with an error other
# than 403 Forbidden
- fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ when: "'oo_masters_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
with_items: ec2_term.results
- name: Stop instance if termination failed
@@ -42,6 +43,7 @@
register: ec2_stop
when: item.failed
with_items: ec2_term.results
+ when: "'oo_masters_to_terminate' in groups"
- name: Rename stopped instances
ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
@@ -49,4 +51,5 @@
tags:
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
with_items: ec2_stop.results
+ when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index b08ed7571..fc9b397b4 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,107 +1,25 @@
---
-- name: Populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ec2_private_ip_address }}"
- public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- use_openshift_sdn: False
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- #- openshift_sdn_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index b7ef593e7..36aee14ff 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -33,7 +31,7 @@
with_items: ec2.instances
- name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
with_together:
- oo_new_inst_names
- ec2.instances
@@ -42,7 +40,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -59,7 +57,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
index 1c0c77eb7..40ae56f99 100644
--- a/playbooks/aws/openshift-node/terminate.yml
+++ b/playbooks/aws/openshift-node/terminate.yml
@@ -1,15 +1,15 @@
---
-- name: Populate oo_nodes_to_terminate host group if needed
+- name: Populate oo_nodes_to_terminate host group
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_terminate
+ add_host: name={{ item }} groups=oo_nodes_to_terminate
+ with_items: oo_host_group_exp | default([])
-- name: Gather facts for instances to terminate
+- name: Gather dynamic inventory variables for hosts to terminate
hosts: oo_nodes_to_terminate
+ gather_facts: no
- name: Terminate instances
hosts: localhost
@@ -27,11 +27,12 @@
ignore_errors: yes
register: ec2_term
with_items: host_vars
+ when: "'oo_nodes_to_terminate' in groups"
# Fail if any of the instances failed to terminate with an error other
# than 403 Forbidden
- fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ when: "'oo_nodes_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
with_items: ec2_term.results
- name: Stop instance if termination failed
@@ -42,6 +43,7 @@
register: ec2_stop
when: item.failed
with_items: ec2_term.results
+ when: "'oo_nodes_to_terminate' in groups"
- name: Rename stopped instances
ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
@@ -49,4 +51,5 @@
tags:
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
with_items: ec2_stop.results
+ when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
index 706f9285c..f61d277c6 100644
--- a/playbooks/byo/openshift-master/config.yml
+++ b/playbooks/byo/openshift-master/config.yml
@@ -1,9 +1,15 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ with_items: groups['masters']
-- name: Configure master instances
- hosts: masters
- roles:
- - openshift_master
- - openshift_sdn_master
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
index 69ad7a840..d569827b4 100644
--- a/playbooks/byo/openshift-node/config.yml
+++ b/playbooks/byo/openshift-node/config.yml
@@ -1,79 +1,21 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
- roles:
- - openshift_facts
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+ hosts: localhost
+ gather_facts: no
tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: 'node'
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ with_items: groups.nodes
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups.masters[0] }}"
+ groups: oo_first_master
-- name: Register nodes
- hosts: masters[0]
+- include: ../../common/openshift-node/config.yml
vars:
- openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure node instances
- hosts: nodes
- vars:
- sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - openshift_sdn_node
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
new file mode 100644
index 000000000..cd282270f
--- /dev/null
+++ b/playbooks/byo/openshift_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+ hosts: all
+ gather_facts: no
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ register: result
+ - debug: var=result
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
new file mode 100644
index 000000000..14ffa928f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-master/filter_plugins
+++ b/playbooks/common/openshift-cluster/filter_plugins
diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/common/openshift-cluster/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/libvirt/openshift-master/roles
+++ b/playbooks/common/openshift-cluster/roles
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
new file mode 100644
index 000000000..118727273
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
new file mode 100644
index 000000000..162315d46
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
new file mode 100644
index 000000000..e92c6f1ee
--- /dev/null
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
new file mode 100644
index 000000000..05822d118
--- /dev/null
+++ b/playbooks/common/openshift-master/config.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+ roles:
+ - openshift_master
+ - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+ hosts: oo_masters_deployment_type_online
+ roles:
+ - pods
+ - os_env_extras
diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/common/openshift-master/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-node/filter_plugins
+++ b/playbooks/common/openshift-master/filter_plugins
diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
new file mode 100644
index 000000000..c82d69c28
--- /dev/null
+++ b/playbooks/common/openshift-node/config.yml
@@ -0,0 +1,121 @@
+---
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ - name: Create the temp directory on the master
+ file:
+ path: "{{ sync_tmpdir }}"
+ owner: "{{ ansible_ssh_user }}"
+ mode: 0700
+ state: directory
+ changed_when: False
+
+ - name: Create a tarball of the node config directories
+ command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+ args:
+ chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: openshift_nodes
+ changed_when: False
+
+ - name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: openshift_nodes
+ changed_when: False
+
+ - name: Remove the temp directory on the master
+ file:
+ path: "{{ sync_tmpdir }}"
+ state: absent
+ changed_when: False
+
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ roles:
+ - openshift_node
+ - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+
+- name: Delete temporary directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ mktemp.stdout }} state=absent
+ changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+ hosts: oo_nodes_deployment_type_online
+ gather_facts: no
+ roles:
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
new file mode 100644
index 000000000..8b8490246
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 14cdd2537..34a5a0b94 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -4,59 +4,25 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+ - fail: msg="Deployment type not supported for libvirt provider yet"
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 1124b0ea3..bab2fb9f8 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env-{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}"
diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index b4f33bd87..a68edefae 100644
--- a/playbooks/gce/openshift-cluster/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -2,41 +2,39 @@
# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
# the gce task to use the disk_auto_delete parameter to avoid having to delete
# the disk as a separate step on termination
-
-- set_fact:
- machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
- machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
-
- name: Launch instance(s)
gce:
instance_names: "{{ instances }}"
- machine_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
+ machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+ image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
tags:
- - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
- - "env-{{ cluster }}"
- - "host-type-{{ type }}"
- - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+ - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - env-{{ cluster }}
+ - host-type-{{ type }}
+ - env-host-type-{{ cluster }}-openshift-{{ type }}
+ - deployment-type-{{ deployment_type }}
register: gce
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
retries: 20
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 0281ae953..abe6a4c95 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,20 +1,34 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- include: ../openshift-node/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
- include: ../openshift-master/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 973e4c3ef..9ebf39a13 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ed97d539c..ae33083b9 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1 +1,15 @@
---
+deployment_vars:
+ origin:
+ image: centos-7
+ ssh_user:
+ sudo: yes
+ online:
+ image: libra-rhel7
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image: rhel-7
+ ssh_user:
+ sudo: yes
+
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index 857da0763..af6000bc8 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,20 +1,18 @@
---
-- name: master/config.yml, populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: "Configure instances"
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index 287596002..ef10b6cf0 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 8319774f8..452ac5199 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_terminate
+ add_host: name={{ item }} groups=oo_masters_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate master instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_masters_to_terminate'] }}"
disks: "{{ groups['oo_masters_to_terminate'] }}"
register: gce
+ when: "'oo_masters_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 771cc3a94..5b1601176 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,100 +1,24 @@
---
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ gce_private_ip }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 73d0478ab..086ba58bc 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 7d71dfcab..357e0c295 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_terminate
+ add_host: name={{ item }} groups=oo_nodes_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate node instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
disks: "{{ groups['oo_nodes_to_terminate'] }}"
register: gce
+ when: "'oo_nodes_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
new file mode 100644
index 000000000..faf278b10
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_masters_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 6f2df33af..a7ddc1e7e 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -1,65 +1,36 @@
+---
- name: Launch instance(s)
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
-
vars_files:
- - vars.yml
-
+ - vars.yml
+ vars:
+ os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+ os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+ os_libvirt_network: "{{ libvirt_network | default('default') }}"
+ image_url: "{{ deployment_vars[deployment_type].image.url }}"
+ image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+ image_name: "{{ deployment_vars[deployment_type].image.name }}"
tasks:
- - set_fact:
- k8s_type: master
-
- - name: Generate master instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: master_names_output
- with_sequence: start=1 end='{{ num_masters }}'
+ - fail: msg="Deployment type not supported for libvirt provider yet"
+ when: deployment_type in ['online', 'enterprise']
- - set_fact:
- master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+ - include: tasks/configure_libvirt.yml
- - include: launch_instances.yml
- vars:
- instances: '{{ master_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
- group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - set_fact:
- k8s_type: node
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - name: Generate node instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: node_names_output
- with_sequence: start=1 end='{{ num_nodes }}'
+- include: update.yml
- - set_fact:
- node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
-
- - include: launch_instances.yml
- vars:
- instances: '{{ node_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
-
-- hosts: 'tag_env-{{ cluster_id }}'
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
- oo_env: '{{ cluster_id }}'
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
- oo_env: '{{ cluster_id }}'
+- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml
deleted file mode 100644
index 3bbcae981..000000000
--- a/playbooks/libvirt/openshift-cluster/launch_instances.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-- name: Create the libvirt storage directory for openshift
- file:
- dest: '{{ libvirt_storage_pool_path }}'
- state: directory
-
-- name: Download Base Cloud image
- get_url:
- url: '{{ base_image_url }}'
- sha256sum: '{{ base_image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
-
-- name: Create the cloud-init config drive path
- file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
- state: directory
- with_items: '{{ instances }}'
-
-- name: Create the cloud-init config drive files
- template:
- src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
- with_nested:
- - '{{ instances }}'
- - [ user-data, meta-data ]
-
-- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
- args:
- chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
- creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: '{{ instances }}'
-
-- name: Create the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
- ignore_errors: yes
-
-- name: Refresh the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-
-- name: Create VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
-
-- name: Create VMs
- virt:
- name: '{{ item }}'
- command: define
- xml: "{{ lookup('template', '../templates/domain.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Start VMs
- virt:
- name: '{{ item }}'
- state: running
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Collect MAC addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
- register: scratch_mac
- with_items: '{{ instances }}'
-
-- name: Wait for the VMs to get an IP
- command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
- ignore_errors: yes
- register: nb_allocated_ips
- until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 30
- delay: 1
-
-- name: Collect IP addresses of the VMs
- shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
- register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
-
-- set_fact:
- ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
-
-- name: Add new instances
- add_host:
- hostname: '{{ item.0 }}'
- ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: root
- groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
- with_together:
- - instances
- - ips
-
-- name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_items: ips
-
-- name: Wait for root user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_items: ips
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6bf07e3c6..25a25f791 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -1,43 +1,23 @@
+---
- name: Generate oo_list_hosts group
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_uri: 'qemu:///system'
-
+ vars_files:
+ - vars.yml
tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
-
- - name: Collect MAC addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
- register: scratch_mac
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
-
- - name: Collect IP addresses of the VMs
- shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
- register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
- when: item.skipped is not defined
-
- - name: Add hosts
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[1].stdout }}'
- ansible_ssh_user: root
- groups: oo_list_hosts
- with_together:
- - '{{ list_vms.list_vms }}'
- - '{{ scratch_ip.results }}'
- when: item[1].skipped is not defined
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
-
tasks:
- - debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}'
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+ when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+ when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..1cd83f7be
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+ register: net_info_result
+ changed_when: False
+ failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+ command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX"
+ register: mktemp
+ when: net_info_result.rc == 1
+
+- name: Create network xml file
+ template:
+ src: templates/network.xml
+ dest: "{{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+ command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+ file:
+ path: "{{ mktemp.stdout }}"
+ state: absent
+ when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..817acb250
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,27 @@
+---
+- name: Create libvirt storage directory for openshift
+ file:
+ dest: "{{ libvirt_storage_pool_path }}"
+ state: directory
+
+- acl:
+ default: yes
+ entity: kvm
+ etype: group
+ name: "{{ libvirt_storage_pool_path }}"
+ permissions: rwx
+ state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+ register: pool_info_result
+ changed_when: False
+ failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ when: pool_info_result.rc == 1
+
+- name: Refresh the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+ when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..96d440096
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,104 @@
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
+
+- name: Download Base Cloud image
+ get_url:
+ url: '{{ image_url }}'
+ sha256sum: '{{ image_sha256 }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+
+- name: Create the cloud-init config drive path
+ file:
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: directory
+ with_items: instances
+
+- name: Create the cloud-init config drive files
+ template:
+ src: '{{ item[1] }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
+ with_nested:
+ - instances
+ - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+ command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ args:
+ chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: instances
+
+- name: Create VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+ with_items: instances
+
+- name: Create VMs
+ virt:
+ name: '{{ item }}'
+ command: define
+ xml: "{{ lookup('template', '../templates/domain.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items: instances
+
+- name: Start VMs
+ virt:
+ name: '{{ item }}'
+ state: running
+ uri: '{{ libvirt_uri }}'
+ with_items: instances
+
+- name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: instances
+
+- name: Wait for the VMs to get an IP
+ command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+ ignore_errors: yes
+ register: nb_allocated_ips
+ until: nb_allocated_ips.stdout == '{{ instances | length }}'
+ retries: 30
+ delay: 1
+
+- name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: scratch_mac.results
+
+- set_fact:
+ ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+ add_host:
+ hostname: '{{ item.0 }}'
+ ansible_ssh_host: '{{ item.1 }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ with_together:
+ - instances
+ - ips
+
+- name: Wait for ssh
+ wait_for:
+ host: '{{ item }}'
+ port: 22
+ with_items: ips
+
+- name: Wait for openshift user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_together:
+ - instances
+ - ips
diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index da037d138..8cb017367 100644
--- a/playbooks/libvirt/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -1,6 +1,12 @@
<domain type='kvm' id='8'>
<name>{{ item }}</name>
<memory unit='GiB'>1</memory>
+ <metadata xmlns:ansible="https://github.com/ansible/ansible">
+ <ansible:tag>deployment-type-{{ deployment_type }}</ansible:tag>
+ <ansible:tag>env-{{ cluster }}</ansible:tag>
+ <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+ <ansible:tag>host-type-{{ type }}</ansible:tag>
+ </metadata>
<currentMemory unit='GiB'>1</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
@@ -24,18 +30,18 @@
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
<target dev='vdb' bus='virtio'/>
<readonly/>
</disk>
<controller type='usb' index='0' />
<interface type='network'>
- <source network='default'/>
+ <source network='{{ os_libvirt_network }}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
@@ -49,7 +55,6 @@
</channel>
<input type='tablet' bus='usb' />
<input type='mouse' bus='ps2'/>
- <input type='keyboard' bus='ps2'/>
<graphics type='spice' autoport='yes' />
<video>
<model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
new file mode 100644
index 000000000..6b421770d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/meta-data
@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
new file mode 100644
index 000000000..86dcd62bb
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -0,0 +1,23 @@
+<network>
+ <name>openshift-ansible</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <!-- TODO: query for first available virbr interface available -->
+ <bridge name='virbr3' stp='on' delay='0'/>
+ <!-- TODO: make overridable -->
+ <domain name='example.com'/>
+ <dns>
+ <!-- TODO: automatically add host entries -->
+ </dns>
+ <!-- TODO: query for available address space -->
+ <ip address='192.168.55.1' netmask='255.255.255.0'>
+ <dhcp>
+ <range start='192.168.55.2' end='192.168.55.254'/>
+ <!-- TODO: add static entries addresses for the hosts to be created -->
+ </dhcp>
+ </ip>
+</network>
+
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
new file mode 100644
index 000000000..77b788109
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+ - default
+ - name: root
+ ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+ - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index c609169d3..b173a09dd 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,41 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
- name: Terminate instance(s)
hosts: localhost
- connection: local
gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: cluster_group=tag_env-{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - groups['oo_hosts_to_terminate']
+ - [ destroy, undefine ]
- tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ destroy, undefine ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Delete the VM cloud-init image
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs config drive
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
- state: absent
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Remove the cloud-init config directory
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
new file mode 100644
index 000000000..57e36db9e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4e4eecd46..65d954fee 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -1,7 +1,33 @@
-# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
-base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
-base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
-base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+deployment_vars:
+ origin:
+ image:
+ url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+ name: CentOS-7-x86_64-GenericCloud.qcow2
+ sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+ ssh_user: openshift
+ sudo: yes
+ online:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: openshift
+ sudo: yes
+# origin:
+# fedora:
+# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml
deleted file mode 100644
index dd95fd57f..000000000
--- a/playbooks/libvirt/openshift-master/config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-- name: master/config.yml, populate oo_masters_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_masters_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Configure instances
- hosts: oo_masters_to_config
- vars:
- openshift_hostname: '{{ ansible_default_ipv4.address }}'
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-master/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml
deleted file mode 100644
index 3244a8046..000000000
--- a/playbooks/libvirt/openshift-node/config.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_nodes_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
- - add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- when: oo_host_group_exp is defined
-
-
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ansible_default_ipv4.address }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
- vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-node/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data
deleted file mode 100644
index 5d779519f..000000000
--- a/playbooks/libvirt/templates/meta-data
+++ /dev/null
@@ -1,2 +0,0 @@
-instance-id: {{ item[0] }}
-local-hostname: {{ item[0] }}
diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data
deleted file mode 100644
index 985badc8e..000000000
--- a/playbooks/libvirt/templates/user-data
+++ /dev/null
@@ -1,10 +0,0 @@
-#cloud-config
-
-disable_root: 0
-
-system_info:
- default_user:
- name: root
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 941190534..c55677c3f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Set common OpenShift facts
openshift_facts:
- role: 'common'
+ role: common
local_facts:
cluster_id: "{{ openshift_cluster_id | default('default') }}"
debug_level: "{{ openshift_debug_level | default(0) }}"
@@ -10,7 +10,7 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-
+ deployment_type: "{{ openshift_deployment_type }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 50816d319..9f657a2c7 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,3 +5,7 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
+
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0dd343443..1e0d5c605 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -21,8 +21,11 @@ class OpenShiftFactsUnsupportedRoleError(Exception):
class OpenShiftFactsFileWriteError(Exception):
pass
+class OpenShiftFactsMetadataUnavailableError(Exception):
+ pass
+
class OpenShiftFacts():
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -169,20 +172,18 @@ class OpenShiftFacts():
return hostname
def get_defaults(self, roles):
- hardware_facts = self.get_hardware_facts()
- net_facts = self.get_net_facts()
- base_facts = self.get_base_facts()
+ ansible_facts = self.get_ansible_facts()
defaults = dict()
common = dict(use_openshift_sdn=True)
- ip = net_facts['default_ipv4']['address']
+ ip = ansible_facts['default_ipv4']['address']
common['ip'] = ip
common['public_ip'] = ip
rc, output, error = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if rc == 0 else ''
- hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+ hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
hostname = self.choose_hostname(hostname_values)
common['hostname'] = hostname
@@ -196,14 +197,14 @@ class OpenShiftFacts():
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=False,
- etcd_port='4001')
+ etcd_port='4001', portal_net='172.30.17.0/24')
defaults['master'] = master
if 'node' in roles:
node = dict(external_id=common['hostname'], pod_cidr='',
labels={}, annotations={})
- node['resources_cpu'] = hardware_facts['processor_cores']
- node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ node['resources_cpu'] = ansible_facts['processor_cores']
+ node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
defaults['node'] = node
return defaults
@@ -226,8 +227,7 @@ class OpenShiftFacts():
def query_metadata(self, metadata_url, headers=None, expect_json=False):
r, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
- module.fail_json(msg='Failed to query metadata', result=r,
- info=info)
+ raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(r.read())
else:
@@ -252,40 +252,27 @@ class OpenShiftFacts():
def get_provider_metadata(self, metadata_url, supports_recursive=False,
headers=None, expect_json=False):
- if supports_recursive:
- metadata = self.query_metadata(metadata_url, headers, expect_json)
- else:
- metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ try:
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ except OpenShiftFactsMetadataUnavailableError as e:
+ metadata = None
return metadata
- def get_hardware_facts(self):
- if not hasattr(self, 'hardware_facts'):
- self.hardware_facts = Hardware().populate()
- return self.hardware_facts
-
- def get_base_facts(self):
- if not hasattr(self, 'base_facts'):
- self.base_facts = Facts().populate()
- return self.base_facts
-
- def get_virt_facts(self):
- if not hasattr(self, 'virt_facts'):
- self.virt_facts = Virtual().populate()
- return self.virt_facts
-
- def get_net_facts(self):
- if not hasattr(self, 'net_facts'):
- self.net_facts = Network(module).populate()
- return self.net_facts
+ def get_ansible_facts(self):
+ if not hasattr(self, 'ansible_facts'):
+ self.ansible_facts = ansible_facts(module)
+ return self.ansible_facts
def guess_host_provider(self):
# TODO: cloud provider facts should probably be submitted upstream
- virt_facts = self.get_virt_facts()
- hardware_facts = self.get_hardware_facts()
- product_name = hardware_facts['product_name']
- product_version = hardware_facts['product_version']
- virt_type = virt_facts['virtualization_type']
- virt_role = virt_facts['virtualization_role']
+ ansible_facts = self.get_ansible_facts()
+ product_name = ansible_facts['product_name']
+ product_version = ansible_facts['product_version']
+ virt_type = ansible_facts['virtualization_type']
+ virt_role = ansible_facts['virtualization_role']
provider = None
metadata = None
@@ -300,8 +287,9 @@ class OpenShiftFacts():
True)
# Filter sshKeys and serviceAccounts from gce metadata
- metadata['project']['attributes'].pop('sshKeys', None)
- metadata['instance'].pop('serviceAccounts', None)
+ if metadata:
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'ec2'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
@@ -310,12 +298,18 @@ class OpenShiftFacts():
provider = 'openstack'
metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
metadata = self.get_provider_metadata(metadata_url, True, None, True)
- ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
- metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
- # Filter public_keys and random_seed from openstack metadata
- metadata.pop('public_keys', None)
- metadata.pop('random_seed', None)
+ if metadata:
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+
+ if not metadata['ec2_compat']:
+ metadata = None
+
return dict(name=provider, metadata=metadata)
def normalize_provider_facts(self, provider, metadata):
@@ -479,4 +473,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index aa615df39..1b1210007 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,33 +11,67 @@
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+
+# TODO: These values need to be configurable
+- name: Set dns OpenShift facts
+ openshift_facts:
+ role: 'dns'
+ local_facts:
+ ip: "{{ openshift.common.ip }}"
+ domain: local
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Create certificate parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_cert_parent_dir }}"
+ state: directory
+
+- name: Create config parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_master_config | dirname }}"
+ state: directory
+
+# TODO: should probably use a template lookup for this
+# TODO: should allow for setting --etcd, --kubernetes options
+# TODO: recreate config if values change
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+- name: Create master config
+ command: >
+ /usr/bin/openshift start master --write-config
+ --config={{ openshift_master_config }}
+ --portal-net={{ openshift.master.portal_net }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
+ {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
+ {{ ('--nodes=' ~ openshift_node_ips | join(',')) if openshift_node_ips is defined else '' }}
+ args:
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_master_config }}"
-# TODO: We should pre-generate the master config and point to the generated
-# config rather than setting command line flags here
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
- notify:
- - restart openshift-master
-
-# TODO: should this be populated by a fact based on the deployment type
-# (origin, online, enterprise)?
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-master
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
@@ -53,6 +87,6 @@
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
+ command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig
args:
creates: /root/.kube/.kubeconfig
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
new file mode 100644
index 000000000..c52d957ac
--- /dev/null
+++ b/roles/openshift_master/vars/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_master_config: /etc/openshift/master.yaml
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e3c04585b..3d56bdd67 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -13,17 +13,22 @@
failed_when: not result.stat.exists
register: result
with_items:
- - "{{ cert_path }}"
- - "{{ cert_path }}/cert.crt"
- - "{{ cert_path }}/key.key"
- - "{{ cert_path }}/.kubeconfig"
- - "{{ cert_path }}/server.crt"
- - "{{ cert_path }}/server.key"
- - "{{ cert_parent_path }}/ca/cert.crt"
- #- "{{ cert_path }}/node.yaml"
+ - "{{ openshift_node_cert_dir }}"
+ - "{{ openshift_node_cert_dir }}/ca.crt"
+ - "{{ openshift_node_cert_dir }}/client.crt"
+ - "{{ openshift_node_cert_dir }}/client.key"
+ - "{{ openshift_node_cert_dir }}/.kubeconfig"
+ - "{{ openshift_node_cert_dir }}/node-config.yaml"
+ - "{{ openshift_node_cert_dir }}/server.crt"
+ - "{{ openshift_node_cert_dir }}/server.key"
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# --create-certs=false is a temporary workaround until
# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
@@ -32,16 +37,7 @@
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
- notify:
- - restart openshift-node
-
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-node
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
notify:
- restart openshift-node
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
new file mode 100644
index 000000000..c6be83139
--- /dev/null
+++ b/roles/openshift_node/vars/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_node_cert_dir: /etc/openshift/node
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
index 3501e8922..a0befab44 100644
--- a/roles/openshift_register_nodes/defaults/main.yml
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -1,5 +1,2 @@
---
openshift_kube_api_version: v1beta1
-openshift_cert_dir: openshift.local.certificates
-openshift_cert_dir_parent: /var/lib/openshift
-openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index 8ebeb087a..1ec977716 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -97,10 +97,8 @@ class ClientConfigException(Exception):
class ClientConfig:
def __init__(self, client_opts, module):
- _, output, error = module.run_command(["/usr/bin/openshift", "ex",
- "config", "view", "-o",
- "json"] + client_opts,
- check_rc = True)
+ kubectl = module.params['kubectl_cmd']
+ _, output, error = module.run_command(kubectl + ["config", "view", "-o", "json"] + client_opts, check_rc = True)
self.config = json.loads(output)
if not (bool(self.config['clusters']) or
@@ -146,6 +144,9 @@ class ClientConfig:
def get_cluster_for_context(self, context):
return self.get_value_for_context(context, 'cluster')
+ def get_namespace_for_context(self, context):
+ return self.get_value_for_context(context, 'namespace')
+
class Util:
@staticmethod
def remove_empty_elements(mapping):
@@ -247,15 +248,15 @@ class Node:
return Util.remove_empty_elements(node)
def exists(self):
- _, output, error = self.module.run_command(["/usr/bin/osc", "get",
- "nodes"] + self.client_opts,
- check_rc = True)
+ kubectl = self.module.params['kubectl_cmd']
+ _, output, error = self.module.run_command(kubectl + ["get", "nodes"] + self.client_opts, check_rc = True)
if re.search(self.module.params['name'], output, re.MULTILINE):
return True
return False
def create(self):
- cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
+ kubectl = self.module.params['kubectl_cmd']
+ cmd = kubectl + self.client_opts + ['create', '-f', '-']
rc, output, error = self.module.run_command(cmd,
data=self.module.jsonify(self.get_node()))
if rc != 0:
@@ -273,24 +274,26 @@ class Node:
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True, type = 'str'),
- host_ip = dict(type = 'str'),
- hostnames = dict(type = 'list', default = []),
- external_ips = dict(type = 'list', default = []),
- internal_ips = dict(type = 'list', default = []),
- api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
- choices = ['v1beta1', 'v1beta3']),
- cpu = dict(type = 'str'),
- memory = dict(type = 'str'),
- labels = dict(type = 'dict', default = {}), # TODO: needs documented
- annotations = dict(type = 'dict', default = {}), # TODO: needs documented
- pod_cidr = dict(type = 'str'), # TODO: needs documented
- external_id = dict(type = 'str'), # TODO: needs documented
- client_config = dict(type = 'str'), # TODO: needs documented
- client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
+ argument_spec = dict(
+ name = dict(required = True, type = 'str'),
+ host_ip = dict(type = 'str'),
+ hostnames = dict(type = 'list', default = []),
+ external_ips = dict(type = 'list', default = []),
+ internal_ips = dict(type = 'list', default = []),
+ api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
+ choices = ['v1beta1', 'v1beta3']),
+ cpu = dict(type = 'str'),
+ memory = dict(type = 'str'),
+ labels = dict(type = 'dict', default = {}), # TODO: needs documented
+ annotations = dict(type = 'dict', default = {}), # TODO: needs documented
+ pod_cidr = dict(type = 'str'), # TODO: needs documented
+ external_id = dict(type = 'str'), # TODO: needs documented
+ client_config = dict(type = 'str'), # TODO: needs documented
+ client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
+ client_context = dict(type = 'str', default = 'default'), # TODO: needs documented
+ client_namespace = dict(type = 'str', default = 'default'), # TODO: needs documented
+ client_user = dict(type = 'str', default = 'system:openshift-client'), # TODO: needs documented
+ kubectl_cmd = dict(type = 'list', default = ['kubectl']) # TODO: needs documented
),
mutually_exclusive = [
['host_ip', 'external_ips'],
@@ -333,14 +336,16 @@ def main():
client_cluster = module.params['client_cluster']
if config.has_cluster(client_cluster):
- if client_cluster != config.get_cluster_for_context(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_context):
client_opts.append("--cluster=%s" % client_cluster)
else:
module.fail_json(msg="Cluster %s not found in client config" %
client_cluster)
- # TODO: provide sane defaults for some (like hostname, externalIP,
- # internalIP, etc)
+ client_namespace = module.params['client_namespace']
+ if client_namespace != config.get_namespace_for_context(client_context):
+ client_opts.append("--namespace=%s" % client_namespace)
+
node = Node(module, client_opts, module.params['api_version'],
module.params['name'], module.params['host_ip'],
module.params['hostnames'], module.params['external_ips'],
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index 7319b88b1..85f490f70 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -3,53 +3,37 @@
# TODO: recreate master/node configs if settings that affect the configs
# change (hostname, public_hostname, ip, public_ip, etc)
-# TODO: create a failed_when condition
-- name: Create node server certificates
- command: >
- /usr/bin/openshift admin create-server-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
- --hostnames={{ [item.openshift.common.hostname,
- item.openshift.common.public_hostname]|unique|join(",") }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: openshift_nodes
- register: server_cert_result
-
-# TODO: create a failed_when condition
-- name: Create node client certificates
- command: >
- /usr/bin/openshift admin create-node-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --node-name={{ item.openshift.common.hostname }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
- with_items: openshift_nodes
- register: node_cert_result
+# TODO: use a template lookup here
# TODO: create a failed_when condition
-- name: Create kubeconfigs for nodes
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+- name: Create node config
command: >
- /usr/bin/openshift admin create-kubeconfig
- --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ /usr/bin/openshift admin create-node-config
+ --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
+ --node={{ item.openshift.common.hostname }}
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
+ --dns-domain={{ openshift.dns.domain }}
+ --dns-ip={{ openshift.dns.ip }}
+ --master={{ openshift.master.api_url }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --signer-serial={{ openshift_master_ca_dir }}/serial.txt
+ --node-client-certificate-authority={{ openshift_master_ca_cert }}
+ {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
+ --listen=https://0.0.0.0:10250
args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
with_items: openshift_nodes
- register: kubeconfig_result
- name: Register unregistered nodes
kubernetes_register_node:
- client_user: openshift-client
+ kubectl_cmd: ['openshift', 'kube']
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
@@ -61,7 +45,5 @@
external_id: "{{ item.openshift.node.external_id }}"
# TODO: support customizing other attributes such as: client_config,
# client_cluster, client_context, client_user
- # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
- # internal_ips, external_id
with_items: openshift_nodes
register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
new file mode 100644
index 000000000..bd497f08f
--- /dev/null
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -0,0 +1,7 @@
+---
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 6713e11fc..6bbedd839 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | |
|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | online | Possible values enterprise, origin, online |
+| openshift_deployment_type | None | Possible values enterprise, origin, online |
| openshift_additional_repos | {} | TODO |
Dependencies
diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 1730207f4..7c5a14cd7 100644
--- a/roles/openshift_repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,7 +1,2 @@
---
-# TODO: once we are able to configure/deploy origin using the openshift roles,
-# then we should default to origin
-
-# TODO: push the defaulting of these values to the openshift_facts module
-openshift_deployment_type: online
openshift_additional_repos: {}
diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo
deleted file mode 100644
index 1deae2939..000000000
--- a/roles/openshift_repos/files/online/epel7-kubernetes.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-epel7-kubernetes]
-name=Copr repo for epel7-kubernetes owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta
index 7b40671a4..7b40671a4 100644
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
+++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release
index 0f83b622d..0f83b622d 100644
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
+++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release
diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/repos/epel7-openshift.repo
index c7629872d..c7629872d 100644
--- a/roles/openshift_repos/files/online/epel7-openshift.repo
+++ b/roles/openshift_repos/files/online/repos/epel7-openshift.repo
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo
index cfe41f691..cfe41f691 100644
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
+++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo
index ddc93193d..ddc93193d 100644
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
+++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo
diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
new file mode 100644
index 000000000..0b21e0a65
--- /dev/null
+++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
@@ -0,0 +1,7 @@
+[maxamillion-origin-next]
+name=Copr repo for origin-next owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
+enabled=1
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index bb1551d37..12e98b7a1 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -10,10 +10,6 @@
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
-# TODO: remove this when origin support actually works
-- fail: msg="OpenShift Origin support is not currently enabled"
- when: openshift_deployment_type == 'origin'
-
- name: Ensure libselinux-python is installed
yum:
pkg: libselinux-python
@@ -36,17 +32,15 @@
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- - '*/*'
- when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+ - '*/repos/*'
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | match("RPM-GPG-KEY-")
+ - "{{ openshift_deployment_type }}/gpg_keys/*"
- name: Configure yum repositories
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | search(".*\.repo$")
+ - "{{ openshift_deployment_type }}/repos/*"
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..2d9243545 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -1,4 +1,3 @@
-# {{ ansible_managed }}
{% for repo in openshift_additional_repos %}
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index f2d61043b..77e7a80ba 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -12,12 +12,21 @@
yum:
pkg: openshift-sdn-master
state: installed
+ register: install_result
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-master settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
+ -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
+ -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
+ -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
notify:
- restart openshift-sdn-master
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index 729c28879..c2329dd6f 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -9,9 +9,15 @@
yum:
pkg: openshift-sdn-node
state: installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# TODO: we are specifying -hostname= for OPTIONS as a workaround for
# openshift-sdn-node not properly detecting the hostname.
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-node settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-node
@@ -20,7 +26,10 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
+ -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
+ -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
+ -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- regex: '^(MASTER_URL=)'
line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 90588d2ae..9d0af497d 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -270,4 +270,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()