summaryrefslogtreecommitdiffstats
path: root/roles/openshift_aws
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_aws')
-rw-r--r--roles/openshift_aws/README.md6
-rw-r--r--roles/openshift_aws/defaults/main.yml186
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py74
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml4
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml1
-rw-r--r--roles/openshift_aws/tasks/elb.yml24
-rw-r--r--roles/openshift_aws/tasks/elb_single.yml34
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml9
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml2
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml14
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml8
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml15
-rw-r--r--roles/openshift_aws/tasks/uninstall_elb.yml11
-rw-r--r--roles/openshift_aws/tasks/uninstall_iam_cert.yml25
-rw-r--r--roles/openshift_aws/tasks/uninstall_s3.yml26
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rw-r--r--roles/openshift_aws/tasks/vpc_and_subnet_id.yml8
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml1
-rw-r--r--roles/openshift_aws/templates/user_data.j23
23 files changed, 321 insertions, 223 deletions
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 4aca5c7a8..de73ab01d 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions
against an AWS account for the purposes of dynamically building an openshift
cluster.
-This role is primarily intended to be used with "include_role" and "tasks_from".
+This role is primarily intended to be used with "import_role" and "tasks_from".
-include_role can be called from the tasks section in a play. See example
+import_role can be called from the tasks section in a play. See example
playbook below for reference.
These task-areas are:
@@ -40,7 +40,7 @@ Example Playbook
----------------
```yaml
-- include_role:
+- import_role:
name: openshift_aws
tasks_from: vpc.yml
vars:
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 71de24339..3d966e34a 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -42,73 +42,101 @@ openshift_aws_ami_tags:
openshift_aws_s3_mode: create
openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
-openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
-openshift_aws_elb_name_dict:
- master:
- external: "{{ openshift_aws_elb_basename }}-master-external"
- internal: "{{ openshift_aws_elb_basename }}-master-internal"
- infra:
- external: "{{ openshift_aws_elb_basename }}-infra"
-openshift_aws_elb_idle_timout: 400
-openshift_aws_elb_scheme: internet-facing
openshift_aws_elb_cert_arn: ''
openshift_aws_elb_dict:
master:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ - protocol: ssl
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ name: "{{ openshift_aws_elb_basename }}-master-external"
+ tags: "{{ openshift_aws_kube_tags }}"
internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: tcp
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ name: "{{ openshift_aws_elb_basename }}-master-internal"
+ tags: "{{ openshift_aws_kube_tags }}"
infra:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ name: "{{ openshift_aws_elb_basename }}-infra"
+ tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: True
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
@@ -145,37 +173,37 @@ openshift_aws_node_groups:
openshift_aws_created_asgs: []
openshift_aws_current_asgs: []
+openshift_aws_scale_group_health_check:
+ period: 60
+ type: EC2
+
# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_master_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_master_group_min_size | default(3) }}"
+ max_size: "{{ openshift_aws_master_group_max_size | default(3) }}"
+ desired_size: "{{ openshift_aws_master_group_desired_size | default(3) }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
+ elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
openshift_aws_node_group_config:
# The 'compute' key is always required here.
compute:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_compute_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_compute_group_min_size | default(3) }}"
+ max_size: "{{ openshift_aws_compute_group_max_size | default(100) }}"
+ desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -183,24 +211,20 @@ openshift_aws_node_group_config:
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
# The 'infra' key is always required here.
infra:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_infra_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_infra_group_min_size | default(2) }}"
+ max_size: "{{ openshift_aws_infra_group_max_size | default(20) }}"
+ desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
-
-openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
-openshift_aws_elb_az_load_balancing: False
+ elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}"
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
@@ -243,8 +267,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
compute:
name: "{{ openshift_aws_clusterid }}_compute"
@@ -258,8 +282,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 30000
@@ -272,8 +296,6 @@ openshift_aws_node_security_groups:
openshift_aws_vpc_tags:
Name: "{{ openshift_aws_vpc_name }}"
-openshift_aws_subnet_az: us-east-1c
-
openshift_aws_vpc:
name: "{{ openshift_aws_vpc_name }}"
cidr: 172.31.0.0/16
@@ -281,13 +303,25 @@ openshift_aws_vpc:
us-east-1:
- cidr: 172.31.48.0/20
az: "us-east-1c"
+ default_az: true
- cidr: 172.31.32.0/20
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}"
+
openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
openshift_aws_masters_groups: masters,etcd,nodes
+
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
+# S3 bucket names are global by default and can take minutes/hours for the
+# name to become available for re-use (assuming someone doesn't take the
+# name in the meantime). Default to just emptying the contents of the S3
+# bucket if we've been asked to create the bucket during provisioning.
+openshift_aws_really_delete_s3_bucket: False
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
deleted file mode 100644
index dfcb11da3..000000000
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift_aws
-'''
-
-from ansible import errors
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use by openshift_aws role'''
-
- @staticmethod
- def scale_groups_serial(scale_group_info, upgrade=False):
- ''' This function will determine what the deployment serial should be and return it
-
- Search through the tags and find the deployment_serial tag. Once found,
- determine if an increment is needed during an upgrade.
- if upgrade is true then increment the serial and return it
- else return the serial
- '''
- if scale_group_info == []:
- return 1
-
- scale_group_info = scale_group_info[0]
-
- if not isinstance(scale_group_info, dict):
- raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
-
- serial = None
-
- for tag in scale_group_info['tags']:
- if tag['key'] == 'deployment_serial':
- serial = int(tag['value'])
- if upgrade:
- serial += 1
- break
- else:
- raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
-
- return serial
-
- @staticmethod
- def scale_groups_match_capacity(scale_group_info):
- ''' This function will verify that the scale group instance count matches
- the scale group desired capacity
-
- '''
- for scale_group in scale_group_info:
- if scale_group['desired_capacity'] != len(scale_group['instances']):
- return False
-
- return True
-
- @staticmethod
- def build_instance_tags(clusterid):
- ''' This function will return a dictionary of the instance tags.
-
- The main desire to have this inside of a filter_plugin is that we
- need to build the following key.
-
- {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
-
- '''
- tags = {'clusterid': clusterid,
- 'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
-
- return tags
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity,
- 'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index c2a2cea30..db30fe5c9 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: setup_master_group.yml
+
- name: fetch masters
ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
@@ -36,4 +38,4 @@
nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
timeout: 60
register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ delegate_to: "{{ groups.masters.0 }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 9485cc3ac..a9f9cc3c4 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -43,6 +43,7 @@
- name: set the value for the deployment_serial and the current asgs
set_fact:
+ # scale_groups_serial is a custom filter in role lib_utils
l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 5d371ec7a..3eb7b73b3 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -2,26 +2,8 @@
- name: "dump the elb listeners for {{ l_elb_dict_item.key }}"
debug:
msg: "{{ l_elb_dict_item.value }}"
+ verbosity: 1
-- name: "Create ELB {{ l_elb_dict_item.key }}"
- ec2_elb_lb:
- name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}"
- state: present
- cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
- security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}"
- idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
- region: "{{ openshift_aws_region }}"
- subnets:
- - "{{ subnetout.subnets[0].id }}"
- health_check: "{{ openshift_aws_elb_health_check }}"
- listeners: "{{ item.value }}"
- scheme: "{{ openshift_aws_elb_scheme }}"
- tags: "{{ openshift_aws_elb_tags }}"
- wait: True
- register: new_elb
+- name: Create ELB(s)
+ include_tasks: elb_single.yml
with_dict: "{{ l_elb_dict_item.value }}"
-
-- debug:
- msg: "{{ item }}"
- with_items:
- - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/elb_single.yml b/roles/openshift_aws/tasks/elb_single.yml
new file mode 100644
index 000000000..864757549
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb_single.yml
@@ -0,0 +1,34 @@
+---
+- name: "dump the elb listeners for {{ item.key }}"
+ debug:
+ msg: "{{ item.value }}"
+ verbosity: 1
+
+- name: "Create ELB {{ item.value.name }}"
+ ec2_elb_lb:
+ name: "{{ item.value.name }}"
+ state: present
+ cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}"
+ security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}"
+ idle_timeout: "{{ item.value.idle_timout }}"
+ region: "{{ openshift_aws_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ item.value.health_check }}"
+ listeners: "{{ item.value.listeners }}"
+ scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}"
+ tags: "{{ item.value.tags }}"
+ wait: True
+ register: new_elb
+ retries: 20
+ delay: 5
+ until: new_elb | succeeded
+ ignore_errors: yes
+
+- fail:
+ msg: "couldn't create ELB {{ item.value.name }}"
+ when: not new_elb | succeeded
+
+- debug:
+ msg: "{{ new_elb }}"
+ verbosity: 1
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
index f74a62b8b..42d7d951c 100644
--- a/roles/openshift_aws/tasks/iam_cert.yml
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -18,7 +18,9 @@
- openshift_aws_iam_cert_key_path != ''
- openshift_aws_elb_cert_arn == ''
-- debug: msg="{{ elb_cert_chain }}"
+- debug:
+ msg: "{{ elb_cert_chain }}"
+ verbosity: 1
- name: set_fact openshift_aws_elb_cert_arn
set_fact:
@@ -28,8 +30,3 @@
- openshift_aws_iam_cert_path != ''
- openshift_aws_iam_cert_key_path != ''
- openshift_aws_elb_cert_arn == ''
-
-- name: wait for cert to propagate
- pause:
- seconds: 5
- when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
index 530b0134d..c2e362acd 100644
--- a/roles/openshift_aws/tasks/master_facts.yml
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -3,7 +3,7 @@
ec2_elb_facts:
region: "{{ openshift_aws_region }}"
names:
- - "{{ openshift_aws_elb_name_dict['master']['internal'] }}"
+ - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}"
delegate_to: localhost
register: elbs
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..fcc49c3ea
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,14 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 696b323c0..786db1570 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -14,11 +14,7 @@
instance_type: m4.xlarge
vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
wait: yes
exact_count: 1
count_tag:
@@ -46,5 +42,5 @@
- name: add host to nodes
add_host:
- groups: nodes
+ groups: nodes,g_new_node_hosts
name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 74877d5c7..c1cb37a3b 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -10,6 +10,19 @@
delay: 3
until: instancesout.instances|length > 0
+- name: fetch the ami used to create the instance
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ ami_id: "{{ instancesout.instances[0]['image_id'] }}"
+ register: original_ami_out
+ retries: 20
+ delay: 3
+ until: original_ami_out.results|length > 0
+
+- name: combine the tags of the original ami with newly created ami
+ set_fact:
+ l_openshift_aws_ami_tags: "{{ original_ami_out.results[0]['tags'] | combine(openshift_aws_ami_tags) }}"
+
- name: bundle ami
ec2_ami:
instance_id: "{{ instancesout.instances.0.instance_id }}"
@@ -17,7 +30,7 @@
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
name: "{{ openshift_aws_ami_name }}"
- tags: "{{ openshift_aws_ami_tags }}"
+ tags: "{{ l_openshift_aws_ami_tags }}"
wait: yes
register: amioutput
diff --git a/roles/openshift_aws/tasks/uninstall_elb.yml b/roles/openshift_aws/tasks/uninstall_elb.yml
new file mode 100644
index 000000000..147e9a905
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_elb.yml
@@ -0,0 +1,11 @@
+---
+- name: delete elbs
+ ec2_elb_lb:
+ name: "{{ item }}"
+ region: "{{ openshift_aws_region }}"
+ state: absent
+ with_items: "{{ openshift_aws_elb_dict | json_query('*.*.name') | sum(start = []) }}"
+
+- when: openshift_aws_create_iam_cert | bool
+ name: delete the iam_cert for elb certificate
+ include_tasks: uninstall_iam_cert.yml
diff --git a/roles/openshift_aws/tasks/uninstall_iam_cert.yml b/roles/openshift_aws/tasks/uninstall_iam_cert.yml
new file mode 100644
index 000000000..7b47673ee
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_iam_cert.yml
@@ -0,0 +1,25 @@
+---
+- when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
+ block:
+ - name: delete AWS IAM certificates
+ iam_cert23:
+ state: absent
+ name: "{{ openshift_aws_iam_cert_name }}"
+ register: elb_cert_chain
+ retries: 20
+ delay: 10
+ until: elb_cert_chain | succeeded
+ ignore_errors: yes
+
+ - debug:
+ var: elb_cert_chain
+ verbosity: 1
+
+ - name: check for iam cert error
+ fail:
+ msg: "Couldn't delete IAM cert {{ openshift_aws_iam_cert_name }}"
+ when: not elb_cert_chain | succeeded
diff --git a/roles/openshift_aws/tasks/uninstall_s3.yml b/roles/openshift_aws/tasks/uninstall_s3.yml
new file mode 100644
index 000000000..0b08cbeed
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_s3.yml
@@ -0,0 +1,26 @@
+---
+- name: empty S3 bucket
+ block:
+ - name: get S3 object list
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: list
+ region: "{{ openshift_aws_region }}"
+ register: s3_out
+
+ - name: delete S3 objects
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: delobj
+ object: "{{ item }}"
+ with_items: "{{ s3_out.s3_keys }}"
+ when: openshift_aws_create_s3 | bool
+
+- name: delete S3 bucket
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: delete
+ region: "{{ openshift_aws_region }}"
+ when:
+ - openshift_aws_create_s3 | bool
+ - openshift_aws_really_delete_s3_bucket | bool
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml
index 1b754f863..c2c345faf 100644
--- a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml
+++ b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml
@@ -7,7 +7,9 @@
register: vpcout
- name: debug vcpout
- debug: var=vpcout
+ debug:
+ var: vpcout
+ verbosity: 1
- name: fetch the default subnet id
ec2_vpc_subnet_facts:
@@ -18,4 +20,6 @@
register: subnetout
- name: debug subnetout
- debug: var=subnetout
+ debug:
+ var: subnetout
+ verbosity: 1
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 1f4ef3e1c..3ad876e37 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -8,6 +8,7 @@
tags:
"{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
+ # scale_groups_match_capacity is a custom filter in role lib_utils
until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index bda1334cd..46e4e1cc5 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -20,6 +20,9 @@ runcmd:
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
{% if openshift_aws_node_group.group != 'master' %}
+{# Restarting systemd-hostnamed ensures that instances will have FQDN
+hostnames following network restart. #}
+- [ systemctl, restart, systemd-hostnamed]
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]