--- openshift_aws_create_s3: True openshift_aws_create_iam_cert: True openshift_aws_create_iam_role: False openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True openshift_aws_node_group_upgrade: False openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' openshift_aws_iam_cert_key_path: '' openshift_aws_iam_role_name: openshift_node_describe_instances openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}" openshift_aws_iam_role_policy_name: "describe_instances" openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" openshift_aws_ami: '' openshift_aws_ami_copy_wait: False openshift_aws_ami_encrypt: False openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}" openshift_aws_ami_name: openshift-gi openshift_aws_base_ami_name: ami_base openshift_aws_launch_config_bootstrap_token: '' openshift_aws_users: [] openshift_aws_ami_tags: bootstrap: "true" openshift-created: "true" parent: "{{ openshift_aws_base_ami | default('unknown') }}" openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: "{{ openshift_master_api_port | default(8443) }}" response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: ssl instance_port: "{{ openshift_master_api_port | default(8443) }}" - protocol: ssl load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" instance_protocol: ssl instance_port: "{{ openshift_master_api_port | default(8443) }}" ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" name: "{{ openshift_aws_elb_basename }}-master-external" tags: "{{ openshift_aws_kube_tags }}" internal: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: "{{ openshift_master_api_port | default(8443) }}" response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: tcp instance_port: 80 - protocol: tcp load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" instance_protocol: tcp instance_port: "{{ openshift_master_api_port | default(8443) }}" name: "{{ openshift_aws_elb_basename }}-master-internal" tags: "{{ openshift_aws_kube_tags }}" infra: external: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: 443 response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: tcp instance_port: 443 proxy_protocol: True - protocol: tcp load_balancer_port: 443 instance_protocol: tcp instance_port: 443 proxy_protocol: True name: "{{ openshift_aws_elb_basename }}-infra" tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_group_config_master_volumes: - device_name: /dev/sda1 volume_size: 100 device_type: gp2 delete_on_termination: False - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: False openshift_aws_node_group_config_node_volumes: - device_name: /dev/sda1 volume_size: 100 device_type: gp2 delete_on_termination: True - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: True # build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False openshift_aws_ami_map: master: "{{ openshift_aws_ami }}" infra: "{{ openshift_aws_ami }}" compute: "{{ openshift_aws_ami }}" openshift_aws_master_group: - name: "{{ openshift_aws_clusterid }} master group" group: master tags: host-type: master sub-host-type: default runtime: docker openshift_aws_node_groups: - name: "{{ openshift_aws_clusterid }} compute group" group: compute tags: host-type: node sub-host-type: compute runtime: docker - name: "{{ openshift_aws_clusterid }} infra group" group: infra tags: host-type: node sub-host-type: infra runtime: docker openshift_aws_created_asgs: [] openshift_aws_current_asgs: [] openshift_aws_scale_group_health_check: period: 60 type: EC2 # these will be used during upgrade openshift_aws_master_group_config: # The 'master' key is always required here. master: instance_type: "{{ openshift_aws_master_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_master_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" min_size: "{{ openshift_aws_master_group_min_size | default(3) }}" max_size: "{{ openshift_aws_master_group_max_size | default(3) }}" desired_size: "{{ openshift_aws_master_group_desired_size | default(3) }}" wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" openshift_aws_node_group_config: # The 'compute' key is always required here. compute: instance_type: "{{ openshift_aws_compute_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" min_size: "{{ openshift_aws_compute_group_min_size | default(3) }}" max_size: "{{ openshift_aws_compute_group_max_size | default(100) }}" desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" # The 'infra' key is always required here. infra: instance_type: "{{ openshift_aws_infra_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" min_size: "{{ openshift_aws_infra_group_min_size | default(2) }}" max_size: "{{ openshift_aws_infra_group_max_size | default(20) }}" desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" # build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" openshift_aws_launch_config_security_groups: compute: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_compute" # node type sg - "{{ openshift_aws_clusterid }}_compute_k8s" # node type sg k8s infra: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_infra" # node type sg - "{{ openshift_aws_clusterid }}_infra_k8s" # node type sg k8s master: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_master" # node type sg - "{{ openshift_aws_clusterid }}_master_k8s" # node type sg k8s openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" desc: "{{ openshift_aws_clusterid }} default" rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 - proto: all from_port: all to_port: all group_name: "{{ openshift_aws_clusterid }}" master: name: "{{ openshift_aws_clusterid }}_master" desc: "{{ openshift_aws_clusterid }} master instances" rules: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: "{{ openshift_master_api_port | default(8443) }}" to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" desc: "{{ openshift_aws_clusterid }} compute node instances" infra: name: "{{ openshift_aws_clusterid }}_infra" desc: "{{ openshift_aws_clusterid }} infra node instances" rules: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: "{{ openshift_master_api_port | default(8443) }}" to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 to_port: 32000 cidr_ip: 0.0.0.0/0 etcd: name: "{{ openshift_aws_clusterid }}_etcd" desc: "{{ openshift_aws_clusterid }} etcd instances" openshift_aws_vpc_tags: Name: "{{ openshift_aws_vpc_name }}" openshift_aws_vpc: name: "{{ openshift_aws_vpc_name }}" cidr: 172.31.0.0/16 subnets: us-east-1: - cidr: 172.31.48.0/20 az: "us-east-1c" default_az: true - cidr: 172.31.32.0/20 az: "us-east-1e" - cidr: 172.31.16.0/20 az: "us-east-1a" openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}" openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node openshift_aws_masters_groups: masters,etcd,nodes # By default, don't delete things like the shared IAM instance # profile and uploaded ssh keys openshift_aws_enable_uninstall_shared_objects: False # S3 bucket names are global by default and can take minutes/hours for the # name to become available for re-use (assuming someone doesn't take the # name in the meantime). Default to just emptying the contents of the S3 # bucket if we've been asked to create the bucket during provisioning. openshift_aws_really_delete_s3_bucket: False