summaryrefslogtreecommitdiffstats
path: root/inventory/byo
diff options
context:
space:
mode:
authorRussell Teague <rteague@redhat.com>2017-12-08 08:42:32 -0500
committerRussell Teague <rteague@redhat.com>2017-12-08 15:55:25 -0500
commit2bf65cdab4aa88f160d005d3b7649b22a6dceba8 (patch)
treeadae4f2b0e6bf75a63175a2371470789c22f0df0 /inventory/byo
parentc0f7152a51c6306340f9634f191e81d4322d1b52 (diff)
downloadopenshift-2bf65cdab4aa88f160d005d3b7649b22a6dceba8.tar.gz
openshift-2bf65cdab4aa88f160d005d3b7649b22a6dceba8.tar.bz2
openshift-2bf65cdab4aa88f160d005d3b7649b22a6dceba8.tar.xz
openshift-2bf65cdab4aa88f160d005d3b7649b22a6dceba8.zip
Cleanup byo references
Diffstat (limited to 'inventory/byo')
-rw-r--r--inventory/byo/.gitignore1
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example60
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example63
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example50
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example56
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example67
-rw-r--r--inventory/byo/hosts.example1089
-rw-r--r--inventory/byo/hosts.openstack37
8 files changed, 0 insertions, 1423 deletions
diff --git a/inventory/byo/.gitignore b/inventory/byo/.gitignore
deleted file mode 100644
index 6ff331c7e..000000000
--- a/inventory/byo/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-hosts
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
deleted file mode 100644
index acf68266e..000000000
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ /dev/null
@@ -1,60 +0,0 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage.
-#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
-# cluster with GlusterFS storage, which will use that storage to create a
-# volume that will provide backend storage for a hosted Docker registry.
-#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
-# deploy GlusterFS storage on an existing cluster. With this playbook, the
-# registry backend volume will be created but the administrator must then
-# either deploy a hosted registry or change an existing hosted registry to use
-# that volume.
-#
-# There are additional configuration parameters that can be specified to
-# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
-# roles/openshift_storage_glusterfs/README.md for additional details.
-
-[OSEv3:children]
-masters
-nodes
-etcd
-# Specify there will be GlusterFS nodes
-glusterfs
-
-[OSEv3:vars]
-ansible_ssh_user=root
-openshift_deployment_type=origin
-# Specify that we want to use an external GlusterFS cluster
-openshift_storage_glusterfs_is_native=False
-# Specify the IP address or hostname of the external heketi service
-openshift_storage_glusterfs_heketi_url=172.0.0.1
-
-[masters]
-master
-
-[nodes]
-master openshift_schedulable=False
-node0 openshift_schedulable=True
-node1 openshift_schedulable=True
-node2 openshift_schedulable=True
-
-[etcd]
-master
-
-# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
-# and "glusterfs_devices" variables defined.
-#
-# The first variable indicates the hostname of the external GLusterFS node,
-# and must be reachable by the external heketi service.
-#
-# The second variable is a list of block devices the node will have access to
-# that are intended solely for use as GlusterFS storage. These block devices
-# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
-# formatted.
-[glusterfs]
-node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
-node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
-node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
deleted file mode 100644
index a559dc377..000000000
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ /dev/null
@@ -1,63 +0,0 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage.
-#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
-# cluster with GlusterFS storage, which will use that storage to create a
-# volume that will provide backend storage for a hosted Docker registry.
-#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
-# deploy GlusterFS storage on an existing cluster. With this playbook, the
-# registry backend volume will be created but the administrator must then
-# either deploy a hosted registry or change an existing hosted registry to use
-# that volume.
-#
-# There are additional configuration parameters that can be specified to
-# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
-# roles/openshift_storage_glusterfs/README.md for additional details.
-
-[OSEv3:children]
-masters
-nodes
-etcd
-# Specify there will be GlusterFS nodes
-glusterfs
-
-[OSEv3:vars]
-ansible_ssh_user=root
-openshift_deployment_type=origin
-# Specify that we want to use an external GlusterFS cluster and a native
-# heketi service
-openshift_storage_glusterfs_is_native=False
-openshift_storage_glusterfs_heketi_is_native=True
-# Specify that heketi will use SSH to communicate to the GlusterFS nodes and
-# the private key file it will use for authentication
-openshift_storage_glusterfs_heketi_executor=ssh
-openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
-[masters]
-master
-
-[nodes]
-master openshift_schedulable=False
-node0 openshift_schedulable=True
-node1 openshift_schedulable=True
-node2 openshift_schedulable=True
-
-[etcd]
-master
-
-# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
-# and "glusterfs_devices" variables defined.
-#
-# The first variable indicates the hostname of the external GLusterFS node,
-# and must be reachable by the external heketi service.
-#
-# The second variable is a list of block devices the node will have access to
-# that are intended solely for use as GlusterFS storage. These block devices
-# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
-# formatted.
-[glusterfs]
-node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
-node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
-node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
deleted file mode 100644
index ca4765c53..000000000
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ /dev/null
@@ -1,50 +0,0 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage for applications. It
-# will also autmatically create a StorageClass for this purpose.
-#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
-# cluster with GlusterFS storage.
-#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
-# deploy GlusterFS storage on an existing cluster.
-#
-# There are additional configuration parameters that can be specified to
-# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
-# roles/openshift_storage_glusterfs/README.md for additional details.
-
-[OSEv3:children]
-masters
-nodes
-etcd
-# Specify there will be GlusterFS nodes
-glusterfs
-
-[OSEv3:vars]
-ansible_ssh_user=root
-openshift_deployment_type=origin
-
-[masters]
-master
-
-[nodes]
-master openshift_schedulable=False
-# A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
-node0 openshift_schedulable=True
-node1 openshift_schedulable=True
-node2 openshift_schedulable=True
-
-[etcd]
-master
-
-# Specify the glusterfs group, which contains the nodes that will host
-# GlusterFS storage pods. At a minimum, each node must have a
-# "glusterfs_devices" variable defined. This variable is a list of block
-# devices the node will have access to that is intended solely for use as
-# GlusterFS storage. These block devices must be bare (e.g. have no data, not
-# be marked as LVM PVs), and will be formatted.
-[glusterfs]
-node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
deleted file mode 100644
index 32040f593..000000000
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ /dev/null
@@ -1,56 +0,0 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage for exclusive use
-# as storage for a natively hosted Docker registry.
-#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
-# cluster with GlusterFS storage, which will use that storage to create a
-# volume that will provide backend storage for a hosted Docker registry.
-#
-# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
-# deploy GlusterFS storage on an existing cluster. With this playbook, the
-# registry backend volume will be created but the administrator must then
-# either deploy a hosted registry or change an existing hosted registry to use
-# that volume.
-#
-# There are additional configuration parameters that can be specified to
-# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
-# roles/openshift_storage_glusterfs/README.md for additional details.
-
-[OSEv3:children]
-masters
-nodes
-etcd
-# Specify there will be GlusterFS nodes
-glusterfs_registry
-
-[OSEv3:vars]
-ansible_ssh_user=root
-openshift_deployment_type=origin
-# Specify that we want to use GlusterFS storage for a hosted registry
-openshift_hosted_registry_storage_kind=glusterfs
-
-[masters]
-master
-
-[nodes]
-master openshift_schedulable=False
-# A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
-node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-
-[etcd]
-master
-
-# Specify the glusterfs group, which contains the nodes that will host
-# GlusterFS storage pods. At a minimum, each node must have a
-# "glusterfs_devices" variable defined. This variable is a list of block
-# devices the node will have access to that is intended solely for use as
-# GlusterFS storage. These block devices must be bare (e.g. have no data, not
-# be marked as LVM PVs), and will be formatted.
-[glusterfs_registry]
-node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
deleted file mode 100644
index 9bd37cbf6..000000000
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ /dev/null
@@ -1,67 +0,0 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage for both general
-# application use and a natively hosted Docker registry. It will also create a
-# StorageClass for the general storage.
-#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
-# cluster with GlusterFS storage.
-#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
-# deploy GlusterFS storage on an existing cluster. With this playbook, the
-# registry backend volume will be created but the administrator must then
-# either deploy a hosted registry or change an existing hosted registry to use
-# that volume.
-#
-# There are additional configuration parameters that can be specified to
-# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
-# roles/openshift_storage_glusterfs/README.md for additional details.
-
-[OSEv3:children]
-masters
-nodes
-etcd
-# Specify there will be GlusterFS nodes
-glusterfs
-glusterfs_registry
-
-[OSEv3:vars]
-ansible_ssh_user=root
-openshift_deployment_type=origin
-# Specify that we want to use GlusterFS storage for a hosted registry
-openshift_hosted_registry_storage_kind=glusterfs
-
-[masters]
-master
-
-[nodes]
-master openshift_schedulable=False
-# It is recommended to not use a single cluster for both general and registry
-# storage, so two three-node clusters will be required.
-node0 openshift_schedulable=True
-node1 openshift_schedulable=True
-node2 openshift_schedulable=True
-# A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
-node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-
-[etcd]
-master
-
-# Specify the glusterfs group, which contains the nodes that will host
-# GlusterFS storage pods. At a minimum, each node must have a
-# "glusterfs_devices" variable defined. This variable is a list of block
-# devices the node will have access to that is intended solely for use as
-# GlusterFS storage. These block devices must be bare (e.g. have no data, not
-# be marked as LVM PVs), and will be formatted.
-[glusterfs]
-node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-
-[glusterfs_registry]
-node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
-node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
deleted file mode 100644
index e3b56d7a1..000000000
--- a/inventory/byo/hosts.example
+++ /dev/null
@@ -1,1089 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# Enable unsupported configurations, things that will yield a partially
-# functioning cluster but would not be supported for production use
-#openshift_enable_unsupported_configurations=false
-
-# SSH user, this user should allow ssh based auth without requiring a
-# password. If using ssh key based auth, then the key should be managed by an
-# ssh agent.
-ansible_user=root
-
-# If ansible_user is not root, ansible_become must be set to true and the
-# user must be configured for passwordless sudo
-#ansible_become=yes
-
-# Debug level for all OpenShift components (Defaults to 2)
-debug_level=2
-
-# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=origin
-#openshift_deployment_type=openshift-enterprise
-
-# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
-# rely on the version running on the first master. Works best for containerized installs where we can usually
-# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
-# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
-# release.
-openshift_release=v3.7
-
-# Specify an exact container image tag to install or configure.
-# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.7.0
-
-# Specify an exact rpm version to install or configure.
-# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.7.0
-
-# This enables all the system containers except for docker:
-#openshift_use_system_containers=False
-#
-# But you can choose separately each component that must be a
-# system container:
-#
-#openshift_use_openvswitch_system_container=False
-#openshift_use_node_system_container=False
-#openshift_use_master_system_container=False
-#openshift_use_etcd_system_container=False
-#
-# In either case, system_images_registry must be specified to be able to find the system images
-#system_images_registry="docker.io"
-# when openshift_deployment_type=='openshift-enterprise'
-#system_images_registry="registry.access.redhat.com"
-
-# Manage openshift example imagestreams and templates during install and upgrade
-#openshift_install_examples=true
-
-# Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
-#openshift_master_logout_url=http://example.com
-
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_template=/path/to/login-template.html
-
-# Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
-#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
-
-# Configure master API rate limits for external clients
-#openshift_master_external_ratelimit_qps=200
-#openshift_master_external_ratelimit_burst=400
-# Configure master API rate limits for loopback clients
-#openshift_master_loopback_ratelimit_qps=300
-#openshift_master_loopback_ratelimit_burst=600
-
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Use Docker inside a System Container. Note that this is a tech preview and should
-# not be used to upgrade!
-# The following options for docker are ignored:
-# - docker_version
-# - docker_upgrade
-# The following options must not be used
-# - openshift_docker_options
-#openshift_docker_use_system_container=False
-# Install and run cri-o along side docker
-# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
-# just as container-engine does.
-#openshift_use_crio=False
-# Force the registry to use for the container-engine/crio system container. By default the registry
-# will be built off of the deployment type and ansible_distribution. Only
-# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
-# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used
-# unless you know what you are doing!!
-# The following two variables are used when opneshift_use_crio is True
-# and cleans up after builds that pass through docker.
-# Enable docker garbage collection when using cri-o
-#openshift_crio_enable_docker_gc=false
-# Node Selectors to run the garbage collection
-#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'}
-
-# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
-# Default value: "--log-driver=journald"
-#openshift_docker_options="-l warn --ipv6=false"
-
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
-# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
-# Uncomment below to disable; for example if your kernel does not support the
-# Docker overlay/overlay2 storage drivers with SELinux enabled.
-#openshift_docker_selinux_enabled=False
-
-# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
-# docker_upgrade=False
-
-# Specify exact version of etcd to configure or upgrade to.
-# etcd_version="3.1.0"
-# Enable etcd debug logging, defaults to false
-# etcd_debug=true
-# Set etcd log levels by package
-# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
-
-# Upgrade Hooks
-#
-# Hooks are available to run custom tasks at various points during a cluster
-# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
-# absolute paths, if not the path will be treated as relative to the file where the
-# hook is actually used.
-#
-# Tasks to run before each master is upgraded.
-# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
-#
-# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
-# upgrade steps, but before we restart system/services.
-# openshift_master_upgrade_hook=/usr/share/custom/master.yml
-#
-# Tasks to run after each master is upgraded and system/services have been restarted.
-# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
-# Alternate image format string, useful if you've got your own registry mirror
-# Configure this setting just on node or master
-#oreg_url_master=example.com/openshift3/ose-${component}:${version}
-#oreg_url_node=example.com/openshift3/ose-${component}:${version}
-# For setting the configuration globally
-#oreg_url=example.com/openshift3/ose-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
-# modify image streams to point at that registry by setting the following to true
-#openshift_examples_modify_imagestreams=true
-
-# If oreg_url points to a registry requiring authentication, provide the following:
-#oreg_auth_user=some_user
-#oreg_auth_password='my-pass'
-# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
-# oreg_auth_pass should be generated from running docker login.
-# To update registry auth credentials, uncomment the following:
-#oreg_auth_credentials_replace: True
-
-# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-#openshift_repos_enable_testing=false
-
-# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
-# Defining htpasswd users
-#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
-# or
-#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
-
-# Allow all auth
-#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
-
-# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-#
-# Configure LDAP CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the LDAPPasswordIdentityProvider.
-#
-#openshift_master_ldap_ca=<ca text>
-# or
-#openshift_master_ldap_ca_file=<path to local ca file to use>
-
-# OpenID auth
-#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
-#
-# Configure OpenID CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the OpenIDIdentityProvider.
-#
-#openshift_master_openid_ca=<ca text>
-# or
-#openshift_master_openid_ca_file=<path to local ca file to use>
-
-# Request header auth
-#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
-#
-# Configure request header CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "clientCA"
-# key set within the RequestHeaderIdentityProvider.
-#
-#openshift_master_request_header_ca=<ca text>
-# or
-#openshift_master_request_header_ca_file=<path to local ca file to use>
-
-# CloudForms Management Engine (ManageIQ) App Install
-#
-# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_management/README.md for instructions
-# and requirements.
-#openshift_management_install_management=False
-
-# Cloud Provider Configuration
-#
-# Note: You may make use of environment variables rather than store
-# sensitive configuration within the ansible inventory.
-# For example:
-#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
-#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
-#
-# AWS
-#openshift_cloudprovider_kind=aws
-# Note: IAM profiles may be used instead of storing API credentials on disk.
-#openshift_cloudprovider_aws_access_key=aws_access_key_id
-#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
-#
-# Openstack
-#openshift_cloudprovider_kind=openstack
-#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
-#openshift_cloudprovider_openstack_username=username
-#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_domain_id=domain_id
-#openshift_cloudprovider_openstack_domain_name=domain_name
-#openshift_cloudprovider_openstack_tenant_id=tenant_id
-#openshift_cloudprovider_openstack_tenant_name=tenant_name
-#openshift_cloudprovider_openstack_region=region
-#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
-#
-# GCE
-#openshift_cloudprovider_kind=gce
-
-# Project Configuration
-#osm_project_request_message=''
-#osm_project_request_template=''
-#osm_mcs_allocator_range='s0:/2'
-#osm_mcs_labels_per_project=5
-#osm_uid_allocator_range='1000000000-1999999999/10000'
-
-# Configure additional projects
-#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
-
-# Enable cockpit
-#osm_use_cockpit=true
-#
-# Set cockpit plugins
-#osm_cockpit_plugins=['cockpit-kubernetes']
-
-# Native high availability (default cluster method)
-# If no lb group is defined, the installer assumes that a load balancer has
-# been preconfigured. For installation the value of
-# openshift_master_cluster_hostname must resolve to the load balancer
-# or to one or all of the masters defined in the inventory if no load
-# balancer is present.
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Configure controller arguments
-#osm_controller_args={'resource-quota-sync-period': ['10s']}
-
-# Configure api server arguments
-#osm_api_server_args={'max-requests-inflight': ['400']}
-
-# default subdomain to use for exposed routes
-#openshift_master_default_subdomain=apps.test.example.com
-
-# additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
-
-# default project node selector
-#osm_default_node_selector='region=primary'
-
-# Override the default pod eviction timeout
-#openshift_master_pod_eviction_timeout=5m
-
-# Override the default oauth tokenConfig settings:
-# openshift_master_access_token_max_seconds=86400
-# openshift_master_auth_token_max_seconds=500
-
-# Override master servingInfo.maxRequestsInFlight
-#openshift_master_max_requests_inflight=500
-
-# Override master and node servingInfo.minTLSVersion and .cipherSuites
-# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
-# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
-#openshift_master_min_tls_version=VersionTLS12
-#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-#
-#openshift_node_min_tls_version=VersionTLS12
-#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-
-# default storage plugin dependencies to install, by default the ceph and
-# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
-
-# OpenShift Router Options
-#
-# An OpenShift router will be created during install if there are
-# nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Router selector (optional)
-# Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
-#
-# Router replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift router selector.
-#openshift_hosted_router_replicas=2
-#
-# Router force subdomain (optional)
-# A router path format to force on all routes used by this router
-# (will ignore the route host value)
-#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
-#
-# Router certificate (optional)
-# Provide local certificate paths which will be configured as the
-# router's default certificate.
-#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
-#
-# Manage the OpenShift Router (optional)
-#openshift_hosted_manage_router=true
-#
-# Router sharding support has been added and can be achieved by supplying the correct
-# data to the inventory. The variable to house the data is openshift_hosted_routers
-# and is in the form of a list. If no data is passed then a default router will be
-# created. There are multiple combinations of router sharding. The one described
-# below supports routers on separate nodes.
-#
-#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
-
-# OpenShift Registry Console Options
-# Override the console image prefix:
-# origin default is "cockpit/", enterprise default is "openshift3/"
-#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
-# origin default is "kubernetes", enterprise default is "registry-console"
-#openshift_cockpit_deployer_basename=my-console
-# Override image version, defaults to latest for origin, vX.Y product version for enterprise
-#openshift_cockpit_deployer_version=1.4.1
-
-# Openshift Registry Options
-#
-# An OpenShift registry will be created during install if there are
-# nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Registry selector (optional)
-# Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
-#
-# Registry replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift registry selector.
-#openshift_hosted_registry_replicas=2
-#
-# Validity of the auto-generated certificate in days (optional)
-#openshift_hosted_registry_cert_expire_days=730
-#
-# Manage the OpenShift Registry (optional)
-#openshift_hosted_manage_registry=true
-
-# Registry Storage Options
-#
-# NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/registry". "exports" is
-# is the name of the export served by the nfs server. "registry" is
-# the name of a directory inside of "/exports".
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
-# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/registry". "exports" is
-# is the name of the export served by the nfs server. "registry" is
-# the name of a directory inside of "/exports".
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_host=nfs.example.com
-# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
-# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# Openstack
-# Volume must already exist.
-#openshift_hosted_registry_storage_kind=openstack
-#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_registry_storage_openstack_filesystem=ext4
-#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# AWS S3
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_encrypt=false
-#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
-#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Any S3 service (Minio, ExoScale, ...): Basically the same as above
-# but with regionendpoint configured
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_accesskey=access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
-#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Additional CloudFront Options. When using CloudFront all three
-# of the followingg variables must be defined.
-#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
-#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
-#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
-
-# Metrics deployment
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#
-# By default metrics are not automatically deployed, set this to enable them
-#openshift_metrics_install_metrics=true
-#
-# Storage Options
-# If openshift_metrics_storage_kind is unset then metrics will be stored
-# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
-# Storage options A & B currently support only one cassandra pod which is
-# generally enough for up to 1000 pods. Additional volumes can be created
-# manually after the fact and metrics scaled per the docs.
-#
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/metrics". "exports" is
-# is the name of the export served by the nfs server. "metrics" is
-# the name of a directory inside of "/exports".
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/metrics". "exports" is
-# is the name of the export served by the nfs server. "metrics" is
-# the name of a directory inside of "/exports".
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_host=nfs.example.com
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_metrics_storage_kind=dynamic
-#
-# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_metrics/README.md
-#
-# Override metricsPublicURL in the master config for cluster metrics
-# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
-# Currently, you may only alter the hostname portion of the url, alterting the
-# `/hawkular/metrics` path will break installation of metrics.
-#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
-# Configure the prefix and version for the component images
-#openshift_metrics_image_prefix=docker.io/openshift/origin-
-#openshift_metrics_image_version=v3.7
-# when openshift_deployment_type=='openshift-enterprise'
-#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
-#openshift_metrics_image_version=v3.7
-#
-# StorageClass
-# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
-#
-
-# Logging deployment
-#
-# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_logging_install_logging=true
-#
-# Logging storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/logging". "exports" is
-# is the name of the export served by the nfs server. "logging" is
-# the name of a directory inside of "/exports".
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/logging". "exports" is
-# is the name of the export served by the nfs server. "logging" is
-# the name of a directory inside of "/exports".
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_host=nfs.example.com
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_logging_storage_kind=dynamic
-#
-# Option D - none -- Logging will use emptydir volumes which are destroyed when
-# pods are deleted
-#
-# Other Logging Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_logging/README.md
-#
-# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_logging_kibana_hostname=logging.apps.example.com
-# Configure the number of elastic search nodes, unless you're using dynamic provisioning
-# this value must be 1
-#openshift_logging_es_cluster_size=1
-# Configure the prefix and version for the component images
-#openshift_logging_image_prefix=docker.io/openshift/origin-
-#openshift_logging_image_version=v3.7.0
-# when openshift_deployment_type=='openshift-enterprise'
-#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
-#openshift_logging_image_version=3.7.0
-
-# Prometheus deployment
-#
-# Currently prometheus deployment is disabled by default, enable it by setting this
-#openshift_hosted_prometheus_deploy=true
-#
-# Prometheus storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/prometheus"
-#openshift_prometheus_storage_kind=nfs
-#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_storage_nfs_directory=/exports
-#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_storage_volume_name=prometheus
-#openshift_prometheus_storage_volume_size=10Gi
-#openshift_prometheus_storage_labels={'storage': 'prometheus'}
-#openshift_prometheus_storage_type='pvc'
-# For prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_kind=nfs
-#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
-#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_volume_size=10Gi
-#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
-#openshift_prometheus_alertmanager_storage_type='pvc'
-# For prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_kind=nfs
-#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
-#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
-#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
-#openshift_prometheus_alertbuffer_storage_type='pvc'
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/prometheus"
-#openshift_prometheus_storage_kind=nfs
-#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_storage_host=nfs.example.com
-#openshift_prometheus_storage_nfs_directory=/exports
-#openshift_prometheus_storage_volume_name=prometheus
-#openshift_prometheus_storage_volume_size=10Gi
-#openshift_prometheus_storage_labels={'storage': 'prometheus'}
-#openshift_prometheus_storage_type='pvc'
-# For prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_kind=nfs
-#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertmanager_storage_host=nfs.example.com
-#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
-#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_volume_size=10Gi
-#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
-#openshift_prometheus_alertmanager_storage_type='pvc'
-# For prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_kind=nfs
-#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
-#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
-#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
-#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
-#openshift_prometheus_alertbuffer_storage_type='pvc'
-#
-# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
-# which are destroyed when pods are deleted
-
-# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
-# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
-
-# Disable the OpenShift SDN plugin
-# openshift_use_openshift_sdn=False
-
-# Configure SDN cluster network and kubernetes service CIDR blocks. These
-# network blocks should be private and should not conflict with network blocks
-# in your infrastructure that pods may require access to. Can not be changed
-# after deployment.
-#
-# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
-# 172.17.0.0/16. Your installation will fail and/or your configuration change will
-# cause the Pod SDN or Cluster SDN to fail.
-#
-# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
-# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
-# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_cluster_network_cidr: clusterNetworkCIDR
-# openshift_portal_net: serviceNetworkCIDR
-# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
-# Sane examples are provided below.
-#osm_cluster_network_cidr=10.128.0.0/14
-#openshift_portal_net=172.30.0.0/16
-
-# ExternalIPNetworkCIDRs controls what values are acceptable for the
-# service external IP field. If empty, no externalIP may be set. It
-# may contain a list of CIDRs which are checked for access. If a CIDR
-# is prefixed with !, IPs in that CIDR will be rejected. Rejections
-# will be applied first, then the IP checked against one of the
-# allowed CIDRs. You should ensure this range does not overlap with
-# your nodes, pods, or service CIDRs for security reasons.
-#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-
-# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
-# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
-# be assigned. It may contain a single CIDR that will be allocated from. For
-# security reasons, you should ensure that this range does not overlap with
-# the CIDRs reserved for external IPs, nodes, pods, or services.
-#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-
-# Configure number of bits to allocate to each host's subnet e.g. 9
-# would mean a /23 network on the host.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_host_subnet_length: hostSubnetLength
-# When installing osm_host_subnet_length must be set. A sane example is provided below.
-#osm_host_subnet_length=9
-
-# Configure master API and console ports.
-#openshift_master_api_port=8443
-#openshift_master_console_port=8443
-
-# set exact RPM version (include - prefix)
-#openshift_pkg_version=-3.6.0
-# you may also specify version and release, ie:
-#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7
-
-# Configure custom ca certificate
-#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
-#
-# NOTE: CA certificate will not be replaced with existing clusters.
-# This option may only be specified when creating a new cluster or
-# when redeploying cluster certificates with the redeploy-certificates
-# playbook.
-
-# Configure custom named certificates (SNI certificates)
-#
-# https://docs.openshift.org/latest/install_config/certificate_customization.html
-# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
-#
-# NOTE: openshift_master_named_certificates is cached on masters and is an
-# additive fact, meaning that each run with a different set of certificates
-# will add the newly provided certificates to the cached set of certificates.
-#
-# An optional CA may be specified for each named certificate. CAs will
-# be added to the OpenShift CA bundle which allows for the named
-# certificate to be served for internal cluster communication.
-#
-# If you would like openshift_master_named_certificates to be overwritten with
-# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates=true
-#
-# Provide local certificate paths which will be deployed to masters
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
-#
-# Detected names may be overridden by specifying the "names" key
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
-
-# Session options
-#openshift_master_session_name=ssn
-#openshift_master_session_max_seconds=3600
-
-# An authentication and encryption secret will be generated if secrets
-# are not provided. If provided, openshift_master_session_auth_secrets
-# and openshift_master_encryption_secrets must be equal length.
-#
-# Signing secrets, used to authenticate sessions using
-# HMAC. Recommended to use secrets with 32 or 64 bytes.
-#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-#
-# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
-# characters long, to select AES-128, AES-192, or AES-256.
-#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-
-# configure how often node iptables rules are refreshed
-#openshift_node_iptables_sync_period=5s
-
-# Configure nodeIP in the node config
-# This is needed in cases where node traffic is desired to go over an
-# interface other than the default network interface.
-#openshift_set_node_ip=True
-
-# Configure dnsIP in the node config
-#openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
-
-# Configure logrotate scripts
-# See: https://github.com/nickhammond/ansible-logrotate
-#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-
-# openshift-ansible will wait indefinitely for your input when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
-# interfaces. This mis-configuration is problematic for any pod leveraging host
-# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
-
-# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
-# in versions >= 3.6
-#openshift_use_dnsmasq=False
-
-# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet or to set
-# options like 'strict-order' to alter dnsmasq configuration.
-#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
-
-# Global Proxy Configuration
-# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
-# variables for docker and master services.
-#
-# Hosts in the openshift_no_proxy list will NOT use any globally
-# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
-# (.example.com), hosts (example.com), and IP addresses.
-#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
-#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
-#openshift_no_proxy='.hosts.example.com,some-host.com'
-#
-# Most environments don't require a proxy between openshift masters, nodes, and
-# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
-# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above instead.
-#
-# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
-# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
-# variable (above) and set this value to False
-#openshift_generate_no_proxy_hosts=True
-#
-# These options configure the BuildDefaults admission controller which injects
-# configuration into Builds. Proxy related values will default to the global proxy
-# config values. You only need to set these if they differ from the global proxy settings.
-# See BuildDefaults documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=mycorp.com
-#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_no_proxy=mycorp.com
-#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
-#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256Mi
-#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512Mi
-
-# Or you may optionally define your own build defaults configuration serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
-
-# These options configure the BuildOverrides admission controller which injects
-# configuration into Builds.
-# See BuildOverrides documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_buildoverrides_force_pull=true
-#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
-
-# Or you may optionally define your own build overrides configuration serialized as json
-#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-
-# Enable service catalog
-#openshift_enable_service_catalog=true
-
-# Enable template service broker (requires service catalog to be enabled, above)
-#template_service_broker_install=true
-
-# Force a specific prefix (IE: registry) to use when pulling the service catalog image
-# NOTE: The registry all the way up to the start of the image name must be provided. Two examples
-# below are provided.
-#openshift_service_catalog_image_prefix=docker.io/openshift/origin-
-#openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose-
-# Force a specific image version to use when pulling the service catalog image
-#openshift_service_catalog_image_version=v3.7
-
-# Configure one of more namespaces whose templates will be served by the TSB
-#openshift_template_service_broker_namespaces=['openshift']
-
-# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
-
-# Admission plugin config
-#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
-
-# Configure usage of openshift_clock role.
-#openshift_clock_enabled=true
-
-# OpenShift Per-Service Environment Variables
-# Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
-# API and controllers environment variables are merged in single
-# master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-
-# Enable API service auditing
-#openshift_master_audit_config={"enabled": true}
-#
-# In case you want more advanced setup for the auditlog you can
-# use this line.
-# The directory in "auditFilePath" will be created if it's not
-# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
-
-# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
-#openshift_enable_origin_repo=false
-
-# Validity of the auto-generated OpenShift certificates in days.
-# See also openshift_hosted_registry_cert_expire_days above.
-#
-#openshift_ca_cert_expire_days=1825
-#openshift_node_cert_expire_days=730
-#openshift_master_cert_expire_days=730
-
-# Validity of the auto-generated external etcd certificates in days.
-# Controls validity for etcd CA, peer, server and client certificates.
-#
-#etcd_ca_default_days=1825
-#
-# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
-# openshift_master_saconfig_limitsecretreferences=false
-
-# Upgrade Control
-#
-# By default nodes are upgraded in a serial manner one at a time and all failures
-# are fatal, one set of variables for normal nodes, one set of variables for
-# nodes that are part of control plane as the number of hosts may be different
-# in those two groups.
-#openshift_upgrade_nodes_serial=1
-#openshift_upgrade_nodes_max_fail_percentage=0
-#openshift_upgrade_control_plane_nodes_serial=1
-#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
-#
-# You can specify the number of nodes to upgrade at once. We do not currently
-# attempt to verify that you have capacity to drain this many nodes at once
-# so please be careful when specifying these values. You should also verify that
-# the expected number of nodes are all schedulable and ready before starting an
-# upgrade. If it's not possible to drain the requested nodes the upgrade will
-# stall indefinitely until the drain is successful.
-#
-# If you're upgrading more than one node at a time you can specify the maximum
-# percentage of failure within the batch before the upgrade is aborted. Any
-# nodes that do fail are ignored for the rest of the playbook run and you should
-# take care to investigate the failure and return the node to service so that
-# your cluster.
-#
-# The percentage must exceed the value, this would fail on two failures
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
-# where as this would not
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
-#
-# Multiple data migrations take place and if they fail they will fail the upgrade
-# You may wish to disable these or make them non fatal
-#
-# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal=true
-# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal=false
-
-######################################################################
-# CloudForms/ManageIQ (CFME/MIQ) Configuration
-
-# See the readme for full descriptions and getting started
-# instructions: ../../roles/openshift_management/README.md or go directly to
-# their definitions: ../../roles/openshift_management/defaults/main.yml
-# ../../roles/openshift_management/vars/main.yml
-#
-# Namespace for the CFME project
-#openshift_management_project: openshift-management
-
-# Namespace/project description
-#openshift_management_project_description: CloudForms Management Engine
-
-# Choose 'miq-template' for a podified database install
-# Choose 'miq-template-ext-db' for an external database install
-#
-# If you are using the miq-template-ext-db template then you must add
-# the required database parameters to the
-# openshift_management_template_parameters variable.
-#openshift_management_app_template: miq-template
-
-# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
-#openshift_management_storage_class: nfs
-
-# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
-# netapp appliance, then you must set the hostname here. Leave the
-# value as 'false' if you are not using external NFS.
-#openshift_management_storage_nfs_external_hostname: false
-
-# [OPTIONAL] - If you are using external NFS then you must set the base
-# path to the exports location here.
-#
-# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
-# that will back the application PV and optionally the database
-# pv. Export path definitions, relative to
-# {{ openshift_management_storage_nfs_base_dir }}
-#
-# LOCAL NFS NOTE:
-#
-# You may may also change this value if you want to change the default
-# path used for local NFS exports.
-#openshift_management_storage_nfs_base_dir: /exports
-
-# LOCAL NFS NOTE:
-#
-# You may override the automatically selected LOCAL NFS server by
-# setting this variable. Useful for testing specific task files.
-#openshift_management_storage_nfs_local_hostname: false
-
-# These are the default values for the username and password of the
-# management app. Changing these values in your inventory will not
-# change your username or password. You should only need to change
-# these values in your inventory if you already changed the actual
-# name and password AND are trying to use integration scripts.
-#
-# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
-#openshift_management_username: admin
-#openshift_management_password: smartvm
-
-# A hash of parameters you want to override or set in the
-# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
-# your inventory file as a simple hash. Acceptable values are defined
-# under the .parameters list in files/miq-template{-ext-db}.yaml
-# Example:
-#
-# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
-#openshift_management_template_parameters: {}
-
-# Firewall configuration
-# You can open additional firewall ports by defining them as a list. of service
-# names and ports/port ranges for either masters or nodes.
-#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
-#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
-
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
-
-[nfs]
-ose3-nfs-ansible.test.example.com
diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack
deleted file mode 100644
index c648078c4..000000000
--- a/inventory/byo/hosts.openstack
+++ /dev/null
@@ -1,37 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-ansible_ssh_user=cloud-user
-ansible_become=yes
-
-# Debug level for all OpenShift components (Defaults to 2)
-debug_level=2
-
-openshift_deployment_type=openshift-enterprise
-
-openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}]
-
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}]
-
-#openshift_pkg_version=-3.0.0.0
-
-[masters]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
-
-[etcd]
-jdetiber-etcd.usersys.redhat.com
-
-[lb]
-#ose3-lb-ansible.test.example.com
-
-[nodes]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
-jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"