summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rwxr-xr-xbin/cluster111
-rw-r--r--filter_plugins/oo_filters.py106
-rw-r--r--inventory/gce/group_vars/all7
-rw-r--r--inventory/gce/group_vars/tag_host-type-master5
-rw-r--r--inventory/gce/group_vars/tag_host-type-node6
l---------inventory/gce/group_vars/tag_host-type-openshift-master1
l---------inventory/gce/group_vars/tag_host-type-openshift-node1
-rw-r--r--playbooks/aws/openshift-master/config.yml6
-rw-r--r--playbooks/aws/openshift-master/launch.yml4
-rw-r--r--playbooks/aws/openshift-node/config.yml8
-rw-r--r--playbooks/aws/openshift-node/launch.yml4
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml63
-rw-r--r--playbooks/gce/openshift-cluster/launch_instances.yml39
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml20
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml1
-rw-r--r--playbooks/gce/openshift-master/config.yml33
-rw-r--r--playbooks/gce/openshift-master/launch.yml4
-rw-r--r--playbooks/gce/openshift-master/terminate.yml3
-rw-r--r--playbooks/gce/openshift-node/config.yml144
-rw-r--r--playbooks/gce/openshift-node/launch.yml8
-rw-r--r--playbooks/gce/openshift-node/terminate.yml3
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/openshift_common/README.md3
-rw-r--r--roles/openshift_common/defaults/main.yml5
-rw-r--r--roles/openshift_master/README.md3
-rw-r--r--roles/openshift_master/tasks/main.yml35
-rw-r--r--roles/openshift_node/README.md4
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml68
-rw-r--r--roles/openshift_register_nodes/README.md38
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml5
-rw-r--r--roles/openshift_register_nodes/library/kubernetes_register_node.py (renamed from roles/openshift_node/library/openshift_register_node.py)42
-rw-r--r--roles/openshift_register_nodes/meta/main.yml128
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml71
-rw-r--r--roles/openshift_sdn_node/README.md3
-rw-r--r--roles/os_firewall/library/os_firewall_manage_iptables.py62
-rw-r--r--roles/os_update_latest/tasks/main.yml3
40 files changed, 780 insertions, 279 deletions
diff --git a/README.md b/README.md
index 9a08bccd9..e7fa89930 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ Setup
- Directory Structure:
- [cloud.rb](cloud.rb) - light wrapper around Ansible
- - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+ - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
- [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
- [inventory/](inventory) - houses Ansible dynamic inventory scripts
- [lib/](lib) - library components of cloud.rb
diff --git a/bin/cluster b/bin/cluster
new file mode 100755
index 000000000..823f50671
--- /dev/null
+++ b/bin/cluster
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+ """Python wrapper to ensure environment is correct for running ansible playbooks
+ """
+
+ def __init__(self, args):
+ self.args = args
+
+ # setup ansible ssh environment
+ if 'ANSIBLE_SSH_ARGS' not in os.environ:
+ os.environ['ANSIBLE_SSH_ARGS'] = (
+ '-o ForwardAgent=yes'
+ ' -o StrictHostKeyChecking=no'
+ ' -o UserKnownHostsFile=/dev/null'
+ ' -o ControlMaster=auto'
+ ' -o ControlPersist=600s'
+ )
+
+ def apply(self):
+ # setup ansible playbook environment
+ config = ConfigParser.ConfigParser()
+ if 'gce' == self.args.provider:
+ config.readfp(open('inventory/gce/gce.ini'))
+
+ for key in config.options('gce'):
+ os.environ[key] = config.get('gce', key)
+
+ inventory = '-i inventory/gce/gce.py'
+ elif 'aws' == self.args.provider:
+ config.readfp(open('inventory/aws/ec2.ini'))
+
+ for key in config.options('ec2'):
+ os.environ[key] = config.get('ec2', key)
+
+ inventory = '-i inventory/aws/ec2.py'
+ else:
+ # this code should never be reached
+ raise argparse.ArgumentError("invalid PROVIDER {}".format(self.args.provider))
+
+ env = {'cluster_id': self.args.cluster_id}
+
+ if 'create' == self.args.action:
+ playbook = "playbooks/{}/openshift-cluster/launch.yml".format(self.args.provider)
+ env['masters'] = self.args.masters
+ env['nodes'] = self.args.nodes
+
+ elif 'terminate' == self.args.action:
+ playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(self.args.provider)
+ elif 'list' == self.args.action:
+ # todo: implement cluster list
+ raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action))
+ elif 'update' == self.args.action:
+ # todo: implement cluster update
+ raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action))
+ else:
+ # this code should never be reached
+ raise argparse.ArgumentError("invalid ACTION {}".format(self.args.action))
+
+ verbose = ''
+ if self.args.verbose > 0:
+ verbose = '-{}'.format('v' * self.args.verbose)
+
+ ansible_env = '-e \'{}\''.format(
+ ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+ )
+
+ command = 'ansible-playbook {} {} {} {}'.format(
+ verbose, inventory, ansible_env, playbook
+ )
+
+ if self.args.verbose > 1:
+ command = 'time {}'.format(command)
+
+ if self.args.verbose > 0:
+ sys.stderr.write('RUN [{}]\n'.format(command))
+ sys.stderr.flush()
+
+ status = os.system(command)
+ if status != 0:
+ sys.stderr.write("RUN [{}] failed with exit status %d".format(command, status))
+ exit(status)
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Manage OpenShift Cluster')
+ parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
+ parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+ parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+ parser.add_argument('--version', action='version', version='%(prog)s 0.1')
+ parser.add_argument('action', choices=['create', 'terminate', 'update', 'list'])
+ parser.add_argument('provider', choices=['gce', 'aws'])
+ parser.add_argument('cluster_id', help='prefix for cluster VM names')
+ args = parser.parse_args()
+
+ if 'terminate' == args.action:
+ sys.stderr.write("This will terminate the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+ sys.stderr.flush()
+ answer = sys.stdin.read(1)
+ if answer not in ['y', 'Y']:
+ exit(0)
+
+ Cluster(args).apply()
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b57056375..caf1fd1f0 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,39 +1,42 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
from ansible import errors, runner
import json
import pdb
def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+ ''' This pops you into a pdb instance where arg is the data passed in from the filter.
Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
+ '''
+ pdb.set_trace()
+ return arg
def oo_len(arg):
- ''' This returns the length of the argument
+ ''' This returns the length of the argument
Ex: "{{ hostvars | oo_len }}"
- '''
- return len(arg)
+ '''
+ return len(arg)
def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+ ''' This looks up dictionary attributes of the form a.b.c and returns the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
- '''
-
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ '''
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- ptr = data
- for attr in attribute.split('.'):
- ptr = ptr[attr]
+ ptr = data
+ for attr in attribute.split('.'):
+ ptr = ptr[attr]
- return ptr
+ return ptr
def oo_collect(data, attribute=None, filters={}):
- ''' This takes a list of dict and collects all attributes specified into a list
- If filter is specified then we will include all items that match _ALL_ of filters.
+ ''' This takes a list of dict and collects all attributes specified into a list
+ If filter is specified then we will include all items that match _ALL_ of filters.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
@@ -42,44 +45,59 @@ def oo_collect(data, attribute=None, filters={}):
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
- '''
+ '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if filters:
- retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
- else:
- retval = [get_attr(d, attribute) for d in data]
+ if filters:
+ retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+ else:
+ retval = [get_attr(d, attribute) for d in data]
- return retval
+ return retval
def oo_select_keys(data, keys):
- ''' This returns a list, which contains the value portions for the keys
+ ''' This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
- '''
+ '''
+
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
- if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+ if not issubclass(type(keys), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not issubclass(type(keys), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys]
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys]
+ return retval
- return retval
+def oo_prepend_strings_in_list(data, prepend):
+ ''' This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, basestring) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+ retval = [prepend + s for s in data]
+ return retval
class FilterModule (object):
- def filters(self):
- return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb
- }
+ def filters(self):
+ return {
+ "oo_select_keys": oo_select_keys,
+ "oo_collect": oo_collect,
+ "oo_len": oo_len,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list
+ }
diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all
new file mode 100644
index 000000000..3e969df63
--- /dev/null
+++ b/inventory/gce/group_vars/all
@@ -0,0 +1,7 @@
+---
+ansible_ssh_user: root
+openshift_hostname: "{{ ansible_default_ipv4.address }}"
+openshift_public_hostname: "{{ ansible_default_ipv4.address }}"
+openshift_ip: "{{ ansible_default_ipv4.address }}"
+openshift_public_ip: "{{ gce_public_ip }}"
+openshift_env: "{{ oo_env }}"
diff --git a/inventory/gce/group_vars/tag_host-type-master b/inventory/gce/group_vars/tag_host-type-master
new file mode 100644
index 000000000..ddbdc650c
--- /dev/null
+++ b/inventory/gce/group_vars/tag_host-type-master
@@ -0,0 +1,5 @@
+---
+openshift_api_url: https://{{ openshift_hostname }}:8443
+openshift_api_public_url: https://{{ openshift_public_hostname }}:8443
+openshift_webui_url: https://{{ openshift_hostname }}:8444
+openshift_webui_public_url: https://{{ openshift_public_hostname }}:8444
diff --git a/inventory/gce/group_vars/tag_host-type-node b/inventory/gce/group_vars/tag_host-type-node
new file mode 100644
index 000000000..bb95a724d
--- /dev/null
+++ b/inventory/gce/group_vars/tag_host-type-node
@@ -0,0 +1,6 @@
+---
+openshift_node_cpu:
+openshift_node_memory:
+openshift_node_pod_cidr:
+openshift_node_labels: {}
+openshift_node_annotations: {}
diff --git a/inventory/gce/group_vars/tag_host-type-openshift-master b/inventory/gce/group_vars/tag_host-type-openshift-master
new file mode 120000
index 000000000..c0c4cf370
--- /dev/null
+++ b/inventory/gce/group_vars/tag_host-type-openshift-master
@@ -0,0 +1 @@
+tag_host-type-master \ No newline at end of file
diff --git a/inventory/gce/group_vars/tag_host-type-openshift-node b/inventory/gce/group_vars/tag_host-type-openshift-node
new file mode 120000
index 000000000..ebbce6136
--- /dev/null
+++ b/inventory/gce/group_vars/tag_host-type-openshift-node
@@ -0,0 +1 @@
+tag_host-type-node \ No newline at end of file
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index b3227afa9..bbf1f654a 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,10 +1,10 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: "populate oo_masters_to_config host group if needed"
hosts: localhost
gather_facts: no
tasks:
- name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_masters_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
@@ -25,7 +25,7 @@
when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
- name: "Configure instances"
- hosts: oo_hosts_to_config
+ hosts: oo_masters_to_config
connection: ssh
user: root
vars_files:
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index a889b93be..3d5a7f579 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -45,8 +45,8 @@
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config"
with_together:
- oo_new_inst_names
- ec2.instances
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index 21807b1cf..3cf2c58b2 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,10 +1,10 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: "populate oo_nodes_to_config host group if needed"
hosts: localhost
gather_facts: no
tasks:
- name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_nodes_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
@@ -31,7 +31,7 @@
when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
- name: "Configure instances"
- hosts: oo_hosts_to_config
+ hosts: oo_nodes_to_config
connection: ssh
user: root
vars_files:
@@ -44,5 +44,5 @@
openshift_env: "{{ oo_env }}",
openshift_public_ip: "{{ ec2_ip_address }}"
}
- - docker
- os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index a889b93be..4745fc658 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -45,8 +45,8 @@
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config"
with_together:
- oo_new_inst_names
- ec2.instances
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
new file mode 100644
index 000000000..889d92d40
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -0,0 +1,63 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: k8s_type="master"
+
+ - name: Generate master instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: master_names_output
+ with_sequence: start=1 end={{ masters }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ master_names: "{{ master_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ group_name: "tag_env-host-type-{{ cluster_id }}-openshift-master"
+
+ - set_fact: k8s_type="node"
+
+ - name: Generate node instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: node_names_output
+ with_sequence: start=1 end={{ nodes }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ node_names: "{{ node_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- hosts: "tag_env-{{ cluster_id }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+ oo_env: "{{ cluster_id }}"
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+ oo_env: "{{ cluster_id }}"
diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml
new file mode 100644
index 000000000..20e31d990
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/launch_instances.yml
@@ -0,0 +1,39 @@
+
+- set_fact:
+ machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
+ machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
+
+- name: Launch instance(s)
+ gce:
+ instance_names: "{{ instances }}"
+ machine_type: "{{ machine_type }}"
+ image: "{{ machine_image }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ tags:
+ - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
+ - "env-{{ cluster }}"
+ - "host-type-{{ type }}"
+ - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+ register: gce
+
+- name: Add new instances public IPs
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+ gce_public_ip: "{{ item.public_ip }}"
+ with_items: gce.instance_data
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.public_ip }}"
+ with_items: gce.instance_data
+
+- name: Wait for root user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_items: gce.instance_data
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..0281ae953
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -0,0 +1,20 @@
+---
+- name: Terminate instance(s)
+ hosts: localhost
+
+ vars_files:
+ - vars.yml
+
+- include: ../openshift-node/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index a74250d13..e405e2fb4 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,41 +1,20 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
+- name: master/config.yml, populate oo_masters_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_masters_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
-- name: "Gather facts for nodes in {{ oo_env }}"
+- name: Gather facts for nodes in {{ oo_env }}
hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
- connection: ssh
- user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_node_ips fact on localhost
- set_fact:
- openshift_node_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+ hosts: oo_masters_to_config
vars_files:
- - vars.yml
+ - vars.yml
roles:
- - {
- role: openshift_master,
- openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
+ - openshift_master
- pods
- os_env_extras
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index f2800b061..3512274cc 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -24,8 +24,8 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config"
with_items: gce.instance_data
- name: Wait for ssh
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 76e1404b5..9e027cf41 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -12,9 +12,10 @@
- debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-- name: Terminate instances
+- name: Terminate master instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate master instances
gce:
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 78047cf40..e0d074572 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,48 +1,122 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_nodes_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
+ - name: Find masters for env
+ add_host: "name={{ item }} groups=oo_masters_for_node_config"
+ with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master']
-- name: "Gather facts for masters in {{ oo_env }}"
- hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
- connection: ssh
- user: root
+- name: Gather facts for masters in {{ oo_env }}
+ hosts: tag_env-host-type-{{ oo_env }}-openshift-master
+ tasks:
+ - set_fact:
+ openshift_master_ip: "{{ openshift_ip }}"
+ openshift_master_api_url: "{{ openshift_api_url }}"
+ openshift_master_webui_url: "{{ openshift_webui_url }}"
+ openshift_master_hostname: "{{ openshift_hostname }}"
+ openshift_master_public_ip: "{{ openshift_public_ip }}"
+ openshift_master_api_public_url: "{{ openshift_api_public_url }}"
+ openshift_master_webui_public_url: "{{ openshift_webui_public_url }}"
+ openshift_master_public_hostnames: "{{ openshift_public_hostname }}"
-- name: "Set OO sepcific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
+- name: Gather facts for hosts to configure
+ hosts: tag_env-host-type-{{ oo_env }}-openshift-node
+ tasks:
+ - set_fact:
+ openshift_node_hostname: "{{ openshift_hostname }}"
+ openshift_node_name: "{{ openshift_hostname }}"
+ openshift_node_cpu: "{{ openshift_node_cpu if openshift_node_cpu else ansible_processor_cores }}"
+ openshift_node_memory: "{{ openshift_node_memory if openshift_node_memory else (ansible_memtotal_mb|int * 1024 * 1024 * 0.75)|int }}"
+ openshift_node_pod_cidr: "{{ openshift_node_pod_cidr if openshift_node_pod_cidr else None }}"
+ openshift_node_host_ip: "{{ openshift_ip }}"
+ openshift_node_labels: "{{ openshift_node_labels if openshift_node_labels else {} }}"
+ openshift_node_annotations: "{{ openshift_node_annotations if openshift_node_annotations else {} }}"
+
+- name: Register nodes
+ hosts: tag_env-host-type-{{ oo_env }}-openshift-master[0]
+ vars:
+ openshift_node_group: tag_env-host-type-{{ oo_env }}-openshift-node
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups[openshift_node_group]) }}"
+ openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
+ openshift_master_urls: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_api_url') }}"
+ openshift_master_public_urls: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_api_public_url') }}"
+ pre_tasks:
+ roles:
+ - openshift_register_nodes
tasks:
- - name: Setting openshift_master_ips fact on localhost
- set_fact:
- openshift_master_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
- - name: Setting openshift_master_public_ips fact on localhost
- set_fact:
- openshift_master_public_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='gce_public_ip') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+# TODO: sync generated certs between masters
+#
+- name: Configure instances
+ hosts: oo_nodes_to_config
vars_files:
- - vars.yml
+ - vars.yml
+ vars:
+ openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
+ openshift_master_ips: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_ip') }}"
+ openshift_master_hostnames: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_hostname') }}"
+ openshift_master_public_ips: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_public_ip') }}"
+ openshift_master_public_hostnames: "{{ hostvars
+ | oo_select_keys(groups[openshift_master_group])
+ | oo_collect(attribute='openshift_master_public_hostname') }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift_node_name }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: only sync to a node if it's certs have been updated
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ # TODO: also copy ca cert: /var/lib/openshift/openshift.local.certificates/ca/cert.crt
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }} state=absent
+ run_once: true
roles:
- - {
- role: openshift_node,
- openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
- openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
- - docker
+ - openshift_node
- os_env_extras
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 935599efd..ca2914d8a 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -24,8 +24,8 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config"
with_items: gce.instance_data
- name: Wait for ssh
@@ -48,10 +48,10 @@
# Always bounce service to pick up new credentials
#- name: "Restart instances"
-# hosts: oo_hosts_to_config
+# hosts: oo_nodes_to_config
# connection: ssh
# user: root
# tasks:
-# - debug: var=groups.oo_hosts_to_config
+# - debug: var=groups.oo_nodes_to_config
# - name: Restart OpenShift
# service: name=openshift-node enabled=yes state=restarted
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 8d60f27b3..9aa8a48c1 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -12,9 +12,10 @@
- debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-- name: Terminate instances
+- name: Terminate node instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate node instances
gce:
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 2ecefd588..ca700db17 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,5 +11,5 @@
# From the origin rpm there exists instructions on how to
# setup origin properly. The following steps come from there
- name: Change root to be in the Docker group
- user: name=root groups=docker append=yes
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index fce79047c..880d66e2c 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -15,8 +15,7 @@ Role Variables
| Name | Default value | |
|-------------------------------|------------------------------|----------------------------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
| openshift_env | default | Envrionment name if multiple OpenShift instances |
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
index eb6edbc03..22b2c6ffd 100644
--- a/roles/openshift_common/defaults/main.yml
+++ b/roles/openshift_common/defaults/main.yml
@@ -1,7 +1,2 @@
---
openshift_debug_level: 0
-
-# TODO: Once openshift stops resolving hostnames for node queries remove
-# this...
-openshift_hostname_workaround: true
-openshift_hostname: "{{ ansible_default_ipv4.address if openshift_hostname_workaround else ansible_fqdn }}"
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 5a1b889b2..2d898bc3b 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -25,9 +25,8 @@ From openshift_common:
| Name | Default Value | |
|-------------------------------|---------------------|---------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d5f4776dc..52f5f694c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# TODO: allow for overriding default ports where possible
+# TODO: if setting up multiple masters, will need to predistribute the certs
+# to the additional masters before starting openshift-master
+
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
@@ -6,9 +10,7 @@
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
- openshift_node_ips %} --nodes={{ openshift_node_ips
- | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+ line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
notify:
- restart openshift-master
@@ -34,42 +36,15 @@
option: externally_managed
value: "{{ openshift_master_manage_service_externally }}"
-# TODO: remove this when origin PR #1298 has landed in OSE
-- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal
- command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/
- args:
- creates: /etc/systemd/system/openshift-master.service
-- ini_file:
- dest: /etc/systemd/system/openshift-master.service
- option: TimeoutStartSec
- section: Service
- value: 300
- state: present
- register: result
-- command: systemctl daemon-reload
- when: result | changed
-# End of workaround pending PR #1298
-
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
when: not openshift_master_manage_service_externally
register: result
-#TODO: remove this when origin PR #1204 has landed in OSE
-- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
- pause: seconds=30
- when: result | changed
-# End of workaround pending PR #1204
-
- name: Disable openshift-master if openshift-master is managed externally
service: name=openshift-master enabled=false
when: openshift_master_manage_service_externally
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
- name: Create .kube directory
file:
path: /root/.kube
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 9210bab16..c9b4eab34 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -21,15 +21,13 @@ From this role:
| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts |
| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
-| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory |
From openshift_common:
| Name | Default Value | |
|-------------------------------|---------------------|---------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index e4d5ebfee..6dc73a96e 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -4,9 +4,3 @@ openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
-openshift_node_resources:
- cpu:
- memory:
- cidr:
-openshift_node_labels: {}
-openshift_node_annotations: {}
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e380ba1fb..c039e3f05 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,27 +1,29 @@
---
+- name: Test if node certs and config exist
+ stat: path={{ item }}
+ failed_when: not result.stat.exists
+ register: result
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_path }}/cert.crt"
+ - "{{ cert_path }}/key.key"
+ - "{{ cert_path }}/.kubeconfig"
+ - "{{ cert_path }}/server.crt"
+ - "{{ cert_path }}/server.key"
+ - "{{ cert_parent_path }}/ca/cert.crt"
+ #- "{{ cert_path }}/node.yaml"
+
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
-- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
-- name: Retrieve OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
- ignore_errors: yes
-
-- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
-
-- name: Store OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
- ignore_errors: yes
-
-- local_action: file name={{ mktemp.stdout }} state=absent
-
+# --create-certs=false is a temporary workaround until
+# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
+# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }}\""
+ line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\""
notify:
- restart openshift-node
@@ -47,42 +49,10 @@
option: externally_managed
value: "{{ openshift_node_manage_service_externally }}"
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-node
- service: name=openshift-node enabled=yes state=restarted
+ service: name=openshift-node enabled=yes state=started
when: not openshift_node_manage_service_externally
- name: Disable openshift-node if openshift-node is managed externally
service: name=openshift-node enabled=false
when: openshift_node_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
- file:
- path: /root/.kube
- state: directory
- mode: 0700
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
- args:
- creates: /root/.kube/.kubeconfig
-
-- name: Register node (if not already registered)
- openshift_register_node:
- name: "{{ openshift_hostname }}"
- api_version: v1beta1
- cpu: "{{ openshift_node_resources.cpu }}"
- memory: "{{ openshift_node_resources.memory }}"
- pod_cidr: "{{ openshift_node_resources.cidr }}"
- host_ip: "{{ ansible_default_ipv4.address }}"
- labels: "{{ openshift_node_labels }}"
- annotations: "{{ openshift_node_annotations }}"
- # TODO: support customizing other attributes such as: client_config,
- # client_cluster, client_context, client_user
- # TODO: updated for v1beta3 changes after rebase: hostnames, external_ips,
- # internal_ips, external_id
diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md
new file mode 100644
index 000000000..225dd44b9
--- /dev/null
+++ b/roles/openshift_register_nodes/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
new file mode 100644
index 000000000..3501e8922
--- /dev/null
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_kube_api_version: v1beta1
+openshift_cert_dir: openshift.local.certificates
+openshift_cert_dir_parent: /var/lib/openshift
+openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index 4922585d7..409215616 100644
--- a/roles/openshift_node/library/openshift_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -53,21 +53,17 @@ options:
cpu:
default: null
description:
- - Number of CPUs to allocate for this node. If not provided, then
- the node will be registered to advertise the number of logical
- CPUs available. When using the v1beta1 API, you must specify the
- CPU count as a floating point number with no more than 3 decimal
- places. API version v1beta3 and newer accepts arbitrary float
- values.
+ - Number of CPUs to allocate for this node. When using the v1beta1
+ API, you must specify the CPU count as a floating point number
+ with no more than 3 decimal places. API version v1beta3 and newer
+ accepts arbitrary float values.
required: false
memory:
default: null
description:
- - Memory available for this node. If not provided, then the node
- will be registered to advertise 80% of MemTotal as available
- memory. When using the v1beta1 API, you must specify the memory
- size in bytes. API version v1beta3 and newer accepts binary SI
- and decimal SI values.
+ - Memory available for this node. When using the v1beta1 API, you
+ must specify the memory size in bytes. API version v1beta3 and
+ newer accepts binary SI and decimal SI values.
required: false
'''
EXAMPLES = '''
@@ -152,22 +148,6 @@ class ClientConfig:
class Util:
@staticmethod
- def getLogicalCores():
- return multiprocessing.cpu_count()
-
- @staticmethod
- def getMemoryPct(pct):
- with open('/proc/meminfo', 'r') as mem:
- for line in mem:
- entries = line.split()
- if str(entries.pop(0)) == 'MemTotal:':
- mem_total_kb = Decimal(entries.pop(0))
- mem_capacity_kb = mem_total_kb * Decimal(pct)
- return str(mem_capacity_kb.to_integral_value() * 1024)
-
- return ""
-
- @staticmethod
def remove_empty_elements(mapping):
if isinstance(mapping, dict):
m = mapping.copy()
@@ -182,8 +162,8 @@ class NodeResources:
def __init__(self, version, cpu=None, memory=None):
if version == 'v1beta1':
self.resources = dict(capacity=dict())
- self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
- self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75)
+ self.resources['capacity']['cpu'] = cpu
+ self.resources['capacity']['memory'] = memory
def get_resources(self):
return Util.remove_empty_elements(self.resources)
@@ -193,8 +173,8 @@ class NodeSpec:
if version == 'v1beta3':
self.spec = dict(podCIDR=cidr, externalID=externalID,
capacity=dict())
- self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
- self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75)
+ self.spec['capacity']['cpu'] = cpu
+ self.spec['capacity']['memory'] = memory
def get_spec(self):
return Util.remove_empty_elements(self.spec)
diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml
new file mode 100644
index 000000000..7b1f0ef0a
--- /dev/null
+++ b/roles/openshift_register_nodes/meta/main.yml
@@ -0,0 +1,128 @@
+---
+galaxy_info:
+ author: your name
+ description:
+ company: your company (optional)
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: license (GPLv2, CC-BY, etc)
+ min_ansible_version: 1.2
+ #
+ # Below are all platforms currently available. Just uncomment
+ # the ones that apply to your role. If you don't see your
+ # platform on this list, let us know and we'll get it added!
+ #
+ #platforms:
+ #- name: EL
+ # versions:
+ # - all
+ # - 5
+ # - 6
+ # - 7
+ #- name: GenericUNIX
+ # versions:
+ # - all
+ # - any
+ #- name: Fedora
+ # versions:
+ # - all
+ # - 16
+ # - 17
+ # - 18
+ # - 19
+ # - 20
+ #- name: SmartOS
+ # versions:
+ # - all
+ # - any
+ #- name: opensuse
+ # versions:
+ # - all
+ # - 12.1
+ # - 12.2
+ # - 12.3
+ # - 13.1
+ # - 13.2
+ #- name: Amazon
+ # versions:
+ # - all
+ # - 2013.03
+ # - 2013.09
+ #- name: GenericBSD
+ # versions:
+ # - all
+ # - any
+ #- name: FreeBSD
+ # versions:
+ # - all
+ # - 8.0
+ # - 8.1
+ # - 8.2
+ # - 8.3
+ # - 8.4
+ # - 9.0
+ # - 9.1
+ # - 9.1
+ # - 9.2
+ #- name: Ubuntu
+ # versions:
+ # - all
+ # - lucid
+ # - maverick
+ # - natty
+ # - oneiric
+ # - precise
+ # - quantal
+ # - raring
+ # - saucy
+ # - trusty
+ #- name: SLES
+ # versions:
+ # - all
+ # - 10SP3
+ # - 10SP4
+ # - 11
+ # - 11SP1
+ # - 11SP2
+ # - 11SP3
+ #- name: GenericLinux
+ # versions:
+ # - all
+ # - any
+ #- name: Debian
+ # versions:
+ # - all
+ # - etch
+ # - lenny
+ # - squeeze
+ # - wheezy
+ #
+ # Below are all categories currently available. Just as with
+ # the platforms above, uncomment those that apply to your role.
+ #
+ #categories:
+ #- cloud
+ #- cloud:ec2
+ #- cloud:gce
+ #- cloud:rax
+ #- clustering
+ #- database
+ #- database:nosql
+ #- database:sql
+ #- development
+ #- monitoring
+ #- networking
+ #- packaging
+ #- system
+ #- web
+dependencies: []
+ # List your role dependencies here, one per line. Only
+ # dependencies available via galaxy should be listed here.
+ # Be sure to remove the '[]' above if you add dependencies
+ # to this list.
+
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
new file mode 100644
index 000000000..59216fc87
--- /dev/null
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -0,0 +1,71 @@
+---
+# TODO: support configuration for multiple masters, currently hardcoding
+# the info from the first master
+
+# TODO: create a failed_when condition
+- name: Create node server certificates
+ command: >
+ /usr/bin/openshift admin create-server-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key
+ --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt"
+ with_items: openshift_nodes
+ register: server_cert_result
+
+# TODO: create a failed_when condition
+- name: Create node client certificates
+ command: >
+ /usr/bin/openshift admin create-node-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
+ --node-name={{ item.openshift_node_hostname }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt"
+ with_items: openshift_nodes
+ register: node_cert_result
+
+# TODO: re-create kubeconfig if certs were regenerated, not just if
+# .kubeconfig doesn't exist
+# TODO: create a failed_when condition
+- name: Create kubeconfigs for nodes
+ command: >
+ /usr/bin/openshift admin create-kubeconfig
+ --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
+ --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
+ --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig
+ --master={{ openshift_master_urls[0] }}
+ --public-master={{ openshift_master_public_urls[0] }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig"
+ with_items: openshift_nodes
+ register: kubeconfig_result
+
+# TODO: generate the node configs (openshift start node --write-config
+# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml'
+# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig'
+# will need to modify the generated node config as needed
+# (servingInfo.{certFile,clientCA,keyFile})
+
+- name: Register unregistered nodes
+ kubernetes_register_node:
+ name: "{{ item.openshift_node_name }}"
+ api_version: "{{ openshift_kube_api_version }}"
+ cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}"
+ memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}"
+ pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}"
+ host_ip: "{{ item.openshift_node_host_ip }}"
+ labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}"
+ annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}"
+ # TODO: support customizing other attributes such as: client_config,
+ # client_cluster, client_context, client_user
+ # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
+ # internal_ips, external_id
+ with_items: openshift_nodes
+ register: register_result
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
index 294550219..2da2d74eb 100644
--- a/roles/openshift_sdn_node/README.md
+++ b/roles/openshift_sdn_node/README.md
@@ -27,9 +27,8 @@ From openshift_common:
| Name | Default value | |
|-------------------------------|---------------------|----------------------------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index fef710055..6a018d022 100644
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -51,11 +51,13 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager:
- def __init__(self, module, ip_version, check_mode, chain):
+ def __init__(self, module):
self.module = module
- self.ip_version = ip_version
- self.check_mode = check_mode
- self.chain = chain
+ self.ip_version = module.params['ip_version']
+ self.check_mode = module.check_mode
+ self.chain = module.params['chain']
+ self.create_jump_rule = module.params['create_jump_rule']
+ self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
@@ -70,13 +72,16 @@ class IpTablesManager:
msg="Failed to save iptables rules",
cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ def verify_chain(self):
+ if not self.chain_exists():
+ self.create_chain()
+ if self.create_jump_rule and not self.jump_rule_exists():
+ self.create_jump()
+
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
- if not self.chain_exists():
- self.create_chain()
- if not self.jump_rule_exists():
- self.create_jump_rule()
+ self.verify_chain()
if self.check_mode:
self.changed = True
@@ -121,13 +126,13 @@ class IpTablesManager:
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
- def create_jump_rule(self):
+ def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
- cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+ cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
@@ -144,11 +149,11 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid INPUT rule
+ # Raise an exception if we do not find a valid rule
if not last_rule_num or not last_rule_target:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to find existing INPUT rules",
+ msg="Failed to find existing %s rules" % self.jump_rule_chain,
cmd=None, exit_code=None, output=None)
# Naively assume that if the last row is a REJECT rule, then
@@ -156,19 +161,20 @@ class IpTablesManager:
# assume that we can just append the rule.
if last_rule_target == 'REJECT':
# insert rule
- cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
else:
# append rule
- cmd = self.cmd + ['-A', 'INPUT']
+ cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
changed = True
self.output.append(output)
+ self.save()
except subprocess.CalledProcessError as e:
if '--line-numbers' in e.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing INPUT rules to "
+ msg="Failed to query existing %s rules to " % self.jump_rule_chain +
"determine jump rule location",
cmd=e.cmd, exit_code=e.returncode,
output=e.output)
@@ -192,6 +198,7 @@ class IpTablesManager:
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
+ self.save()
except subprocess.CalledProcessError as e:
raise IpTablesCreateChainError(
chain=self.chain,
@@ -200,7 +207,7 @@ class IpTablesManager:
)
def jump_rule_exists(self):
- cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
+ cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if subprocess.call(cmd) == 0 else False
def chain_exists(self):
@@ -220,9 +227,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove']),
- protocol=dict(required=True, choices=['tcp', 'udp']),
- port=dict(required=True, type='int'),
+ action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+ chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+ create_jump_rule=dict(required=False, type='bool', default=True),
+ jump_rule_chain=dict(required=False, default='INPUT'),
+ protocol=dict(required=False, choices=['tcp', 'udp']),
+ port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
@@ -232,16 +242,24 @@ def main():
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
- ip_version = module.params['ip_version']
- chain = 'OS_FIREWALL_ALLOW'
- iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+ if action in ['add', 'remove']:
+ if not protocol:
+ error = "protocol is required when action is %s" % action
+ module.fail_json(msg=error)
+ if not port:
+ error = "port is required when action is %s" % action
+ module.fail_json(msg=error)
+
+ iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
+ elif action == 'verify_chain':
+ iptables_manager.verify_chain()
except IpTablesError as e:
module.fail_json(msg=e.msg)
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
new file mode 100644
index 000000000..4a2c3d47a
--- /dev/null
+++ b/roles/os_update_latest/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Update all packages
+ yum: name=* state=latest