summaryrefslogtreecommitdiffstats
path: root/inventory
diff options
context:
space:
mode:
Diffstat (limited to 'inventory')
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example56
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example59
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example46
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.native-glusterfs.example)19
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example63
-rw-r--r--inventory/byo/hosts.origin.example35
-rw-r--r--inventory/byo/hosts.ose.example35
7 files changed, 298 insertions, 15 deletions
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
new file mode 100644
index 000000000..5a284ce97
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.external.example
@@ -0,0 +1,56 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster
+openshift_storage_glusterfs_is_native=False
+# Specify the IP address or hostname of the external heketi service
+openshift_storage_glusterfs_heketi_url=172.0.0.1
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
new file mode 100644
index 000000000..d16df6470
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.mixed.example
@@ -0,0 +1,59 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster and a native
+# heketi service
+openshift_storage_glusterfs_is_native=False
+openshift_storage_glusterfs_heketi_is_native=True
+# Specify that heketi will use SSH to communicate to the GlusterFS nodes and
+# the private key file it will use for authentication
+openshift_storage_glusterfs_heketi_executor=ssh
+openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
new file mode 100644
index 000000000..c1a1f6f84
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.native.example
@@ -0,0 +1,46 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for applications. It
+# will also autmatically create a StorageClass for this purpose.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
index dc847a5b2..31a85ee42 100644
--- a/inventory/byo/hosts.byo.native-glusterfs.example
+++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example
@@ -1,11 +1,12 @@
# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage.
+# with natively hosted, containerized GlusterFS storage for exclusive use
+# as storage for a natively hosted Docker registry.
#
# This inventory may be used with the byo/config.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -20,7 +21,7 @@
masters
nodes
# Specify there will be GlusterFS nodes
-glusterfs
+glusterfs_registry
[OSEv3:vars]
ansible_ssh_user=root
@@ -29,15 +30,15 @@ openshift_deployment_type=origin
openshift_hosted_registry_storage_kind=glusterfs
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
+master openshift_schedulable=False
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
-node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes that will host
# GlusterFS storage pods. At a minimum, each node must have a
@@ -45,7 +46,7 @@ node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedula
# devices the node will have access to that is intended solely for use as
# GlusterFS storage. These block devices must be bare (e.g. have no data, not
# be marked as LVM PVs), and will be formatted.
-[glusterfs]
+[glusterfs_registry]
node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
new file mode 100644
index 000000000..54bd89ddc
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
@@ -0,0 +1,63 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for both general
+# application use and a natively hosted Docker registry. It will also create a
+# StorageClass for the general storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+glusterfs_registry
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# It is recommended to not use a single cluster for both general and registry
+# storage, so two three-node clusters will be required.
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+
+[glusterfs_registry]
+node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index ab6cfdad4..f09c3d255 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -120,6 +120,11 @@ openshift_release=v3.6
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -158,8 +163,9 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Origin copr repo
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -475,6 +481,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -559,6 +567,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
# Logging deployment
#
@@ -721,7 +734,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -747,6 +760,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -754,7 +771,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -869,6 +890,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
# host group for masters
[masters]
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index f797ae5c8..c4b5da5b8 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -116,6 +116,11 @@ openshift_release=v3.6
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
@@ -157,8 +162,9 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Additional yum repos to install
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -475,6 +481,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -559,6 +567,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
# Logging deployment
#
@@ -721,7 +734,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -747,6 +760,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -754,7 +771,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -865,6 +886,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
# host group for masters
[masters]