summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--files/origin-components/template-service-broker-registration.yaml4
-rw-r--r--filter_plugins/oo_filters.py324
-rw-r--r--inventory/byo/hosts.example74
-rw-r--r--inventory/byo/hosts.origin.example900
-rw-r--r--openshift-ansible.spec131
-rw-r--r--playbooks/aws/BUILD_AMI.md21
-rw-r--r--playbooks/aws/PREREQUISITES.md40
-rw-r--r--playbooks/aws/README.md140
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml6
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml9
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini25
-rw-r--r--playbooks/aws/provisioning_vars.yml.example120
-rw-r--r--playbooks/byo/openshift-etcd/embedded2external.yml6
-rw-r--r--playbooks/byo/openshift-management/config.yml2
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml13
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_management.yml25
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml6
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml6
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml14
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml29
-rw-r--r--playbooks/common/openshift-etcd/config.yml6
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml24
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml6
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml17
-rw-r--r--playbooks/common/openshift-management/config.yml20
-rw-r--r--playbooks/common/openshift-master/additional_config.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml13
-rw-r--r--playbooks/common/openshift-nfs/config.yml6
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--playbooks/common/openshift-node/config.yml6
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml6
-rw-r--r--roles/ansible_service_broker/tasks/install.yml26
-rw-r--r--roles/docker/defaults/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml20
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml6
-rw-r--r--roles/etcd/defaults/main.yaml3
-rw-r--r--roles/etcd/tasks/auxiliary/clean_data.yml2
-rw-r--r--roles/etcd/tasks/auxiliary/disable_etcd.yml5
-rw-r--r--roles/etcd/tasks/auxiliary/force_new_cluster.yml31
-rw-r--r--roles/etcd/tasks/backup.archive.yml3
-rw-r--r--roles/etcd/tasks/backup.copy.yml3
-rw-r--r--roles/etcd/tasks/backup.fetch.yml3
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml12
-rw-r--r--roles/etcd/tasks/backup.unarchive.yml3
-rw-r--r--roles/etcd/tasks/backup/archive.yml5
-rw-r--r--roles/etcd/tasks/backup/backup.yml18
-rw-r--r--roles/etcd/tasks/backup/copy.yml5
-rw-r--r--roles/etcd/tasks/backup/fetch.yml8
-rw-r--r--roles/etcd/tasks/backup/unarchive.yml14
-rw-r--r--roles/etcd/tasks/backup/vars.yml18
-rw-r--r--roles/etcd/tasks/backup_master_etcd_certificates.yml2
-rw-r--r--roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml7
-rw-r--r--roles/etcd/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd/tasks/disable_etcd.yml2
-rw-r--r--roles/etcd/tasks/fetch_backup.yml8
-rw-r--r--roles/etcd/tasks/system_container.yml1
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/installer_checkpoint/README.md3
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/README.md38
-rw-r--r--roles/kuryr/defaults/main.yaml72
-rw-r--r--roles/kuryr/meta/main.yml17
-rw-r--r--roles/kuryr/tasks/master.yaml52
-rw-r--r--roles/kuryr/tasks/node.yaml48
-rw-r--r--roles/kuryr/tasks/serviceaccount.yaml31
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j253
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2343
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j240
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py16
-rw-r--r--roles/lib_openshift/src/class/oc_adm_csr.py16
-rw-r--r--roles/openshift_aws/README.md77
-rw-r--r--roles/openshift_aws/defaults/main.yml22
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml23
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml8
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml2
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_aws/templates/user_data.j226
-rw-r--r--roles/openshift_excluder/tasks/install.yml31
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py78
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh2
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml2
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml178
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j24
-rw-r--r--roles/openshift_logging_elasticsearch/vars/default_images.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j24
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j234
-rw-r--r--roles/openshift_master/defaults/main.yml99
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/bootstrap.yml63
-rw-r--r--roles/openshift_master/tasks/check_master_api_is_ready.yml14
-rw-r--r--roles/openshift_master/tasks/configure_external_etcd.yml17
-rw-r--r--roles/openshift_master/tasks/main.yml18
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml10
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml35
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml33
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j211
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml1
-rw-r--r--roles/openshift_node/defaults/main.yml13
-rw-r--r--roles/openshift_node/files/bootstrap.yml63
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/aws.yml21
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml57
-rw-r--r--roles/openshift_node/tasks/config.yml68
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/install.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node/templates/node.service.j26
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/openshift_node_facts/tasks/main.yml1
-rw-r--r--roles/openshift_node_upgrade/README.md1
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml34
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml9
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j22
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml110
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml16
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml4
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j24
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j219
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml8
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml3
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j23
-rw-r--r--roles/template_service_broker/defaults/main.yml1
-rw-r--r--roles/template_service_broker/tasks/install.yml20
-rw-r--r--roles/tuned/defaults/main.yml3
-rw-r--r--roles/tuned/meta/main.yml13
-rw-r--r--roles/tuned/tasks/main.yml (renamed from roles/openshift_node/tasks/tuned.yml)2
-rw-r--r--roles/tuned/templates/openshift-control-plane/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift-node/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-node/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift/tuned.conf)0
-rw-r--r--roles/tuned/templates/recommend.conf (renamed from roles/openshift_node/templates/tuned/recommend.conf)9
177 files changed, 3842 insertions, 1111 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 608b430ce..fb7a6e0b4 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.147.0 ./
+3.7.0-0.169.0 ./
diff --git a/files/origin-components/template-service-broker-registration.yaml b/files/origin-components/template-service-broker-registration.yaml
index 2086978f0..95fb72924 100644
--- a/files/origin-components/template-service-broker-registration.yaml
+++ b/files/origin-components/template-service-broker-registration.yaml
@@ -9,8 +9,8 @@ parameters:
required: true
objects:
# register the tsb with the service catalog
-- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+- apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: template-service-broker
spec:
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 83a05370a..2fbd23450 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -710,8 +710,8 @@ def oo_openshift_env(hostvars):
return facts
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
-def oo_component_persistent_volumes(hostvars, groups, component):
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals
+def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables for a specific component.
"""
@@ -723,84 +723,90 @@ def oo_component_persistent_volumes(hostvars, groups, component):
persistent_volume = None
if component in hostvars['openshift']:
- if 'storage' in hostvars['openshift'][component]:
- params = hostvars['openshift'][component]['storage']
+ if subcomponent is not None:
+ storage_component = hostvars['openshift'][component][subcomponent]
+ else:
+ storage_component = hostvars['openshift'][component]
+
+ if 'storage' in storage_component:
+ params = storage_component['storage']
kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
+ if 'create_pv' in params:
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
-
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
-
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
-
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
return persistent_volume
@@ -820,85 +826,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volumes = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- if 'create_pv' in params:
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
- else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component)
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+
if 'logging' in hostvars['openshift']:
persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
if persistent_volume is not None:
@@ -911,10 +842,22 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
if persistent_volume is not None:
persistent_volumes.append(persistent_volume)
+ if 'prometheus' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'alertmanager' in hostvars['openshift']['prometheus']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'alertbuffer' in hostvars['openshift']['prometheus']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
return persistent_volumes
-def oo_component_pv_claims(hostvars, component):
+def oo_component_pv_claims(hostvars, component, subcomponent=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables for a speicific component.
"""
@@ -922,20 +865,27 @@ def oo_component_pv_claims(hostvars, component):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if component in hostvars['openshift']:
- if 'storage' in hostvars['openshift'][component]:
- params = hostvars['openshift'][component]['storage']
+ if subcomponent is not None:
+ storage_component = hostvars['openshift'][component][subcomponent]
+ else:
+ storage_component = hostvars['openshift'][component]
+
+ if 'storage' in storage_component:
+ params = storage_component['storage']
kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- return persistent_volume_claim
+ if 'create_pv' in params:
+ if 'create_pvc' in params:
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return persistent_volume_claim
return None
@@ -952,22 +902,10 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
persistent_volume_claims = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- if 'create_pv' in params:
- if 'create_pvc' in params:
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component)
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+
if 'logging' in hostvars['openshift']:
persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
if persistent_volume_claim is not None:
@@ -980,6 +918,18 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
if persistent_volume_claim is not None:
persistent_volume_claims.append(persistent_volume_claim)
+ if 'prometheus' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'alertmanager' in hostvars['openshift']['prometheus']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'alertbuffer' in hostvars['openshift']['prometheus']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 0b6050891..499a9d8e7 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -615,6 +615,71 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
#openshift_logging_image_version=3.7.0
+# Prometheus deployment
+#
+# Currently prometheus deployment is disabled by default, enable it by setting this
+#openshift_hosted_prometheus_deploy=true
+#
+# Prometheus storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_host=nfs.example.com
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_host=nfs.example.com
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
+# which are destroyed when pods are deleted
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -811,8 +876,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
+# Enable service catalog
+#openshift_enable_service_catalog=true
+
+# Enable template service broker (requires service catalog to be enabled, above)
+#template_service_broker_install=true
+
+# Configure one of more namespaces whose templates will be served by the TSB
#openshift_template_service_broker_namespaces=['openshift']
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
new file mode 100644
index 000000000..9d811fcab
--- /dev/null
+++ b/inventory/byo/hosts.origin.example
@@ -0,0 +1,900 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+nfs
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_become=yes
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+# Specify the deployment type. Valid values are origin and openshift-enterprise.
+openshift_deployment_type=origin
+
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v3.7
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v3.7.0
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-3.7.0
+
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
+
+# Install the openshift examples
+#openshift_install_examples=true
+
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
+# Configure imagePolicyConfig in the master config
+# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
+#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
+# Docker Configuration
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#openshift_docker_additional_registries=registry.example.com
+#openshift_docker_insecure_registries=registry.example.com
+#openshift_docker_blocked_registries=registry.hacker.com
+# Disable pushing to dockerhub
+#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Instead of using docker, replacec it with cri-o
+# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
+# just as container-engine does.
+#openshift_use_crio=False
+# Force the registry to use for the docker/crio system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
+# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=journald"
+#openshift_docker_options="-l warn --ipv6=false"
+
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.12.1"
+
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
+# Enable etcd debug logging, defaults to false
+# etcd_debug=true
+# Set etcd log levels by package
+# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
+
+# Upgrade Hooks
+#
+# Hooks are available to run custom tasks at various points during a cluster
+# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
+# absolute paths, if not the path will be treated as relative to the file where the
+# hook is actually used.
+#
+# Tasks to run before each master is upgraded.
+# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
+#
+# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
+# upgrade steps, but before we restart system/services.
+# openshift_master_upgrade_hook=/usr/share/custom/master.yml
+#
+# Tasks to run after each master is upgraded and system/services have been restarted.
+# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+
+
+# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
+#oreg_url=example.com/openshift3/ose-${component}:${version}
+# If oreg_url points to a registry other than registry.access.redhat.com we can
+# modify image streams to point at that registry by setting the following to true
+#openshift_examples_modify_imagestreams=true
+
+# OpenShift repository configuration
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+#openshift_repos_enable_testing=false
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+#
+# Configure LDAP CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the LDAPPasswordIdentityProvider.
+#
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# OpenID auth
+#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
+#
+# Configure OpenID CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "ca" key set
+# within the OpenIDIdentityProvider.
+#
+#openshift_master_openid_ca=<ca text>
+# or
+#openshift_master_openid_ca_file=<path to local ca file to use>
+
+# Request header auth
+#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
+#
+# Configure request header CA certificate
+# Specify either the ASCII contents of the certificate or the path to
+# the local file that will be copied to the remote host. CA
+# certificate contents will be copied to master systems and saved
+# within /etc/origin/master/ with a filename matching the "clientCA"
+# key set within the RequestHeaderIdentityProvider.
+#
+#openshift_master_request_header_ca=<ca text>
+# or
+#openshift_master_request_header_ca_file=<path to local ca file to use>
+
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
+# Cloud Provider Configuration
+#
+# Note: You may make use of environment variables rather than store
+# sensitive configuration within the ansible inventory.
+# For example:
+#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
+#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
+#
+# AWS
+#openshift_cloudprovider_kind=aws
+# Note: IAM profiles may be used instead of storing API credentials on disk.
+#openshift_cloudprovider_aws_access_key=aws_access_key_id
+#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
+#
+# Openstack
+#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
+#openshift_cloudprovider_openstack_username=username
+#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_name=tenant_name
+#openshift_cloudprovider_openstack_region=region
+#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
+#
+# GCE
+#openshift_cloudprovider_kind=gce
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure additional projects
+#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availability cluster method with optional load balancer.
+# If no lb group is defined, the installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
+# default subdomain to use for exposed routes
+#openshift_master_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
+# Override the default oauth tokenConfig settings:
+# openshift_master_access_token_max_seconds=86400
+# openshift_master_auth_token_max_seconds=500
+
+# Override master servingInfo.maxRequestsInFlight
+#openshift_master_max_requests_inflight=500
+
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
+
+# OpenShift Router Options
+#
+# An OpenShift router will be created during install if there are
+# nodes present with labels matching the default router selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Router selector (optional)
+# Router will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_router_selector='region=infra'
+#
+# Router replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift router selector.
+#openshift_hosted_router_replicas=2
+#
+# Router force subdomain (optional)
+# A router path format to force on all routes used by this router
+# (will ignore the route host value)
+#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
+#
+# Router certificate (optional)
+# Provide local certificate paths which will be configured as the
+# router's default certificate.
+#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
+#
+# Manage the OpenShift Router
+#openshift_hosted_manage_router=true
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
+
+# OpenShift Registry Console Options
+# Override the console image prefix for enterprise deployments, not used in origin
+# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
+#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
+# Override image version, defaults to latest for origin, matches the product version for enterprise
+#openshift_cockpit_deployer_version=1.4.1
+
+# Openshift Registry Options
+#
+# An OpenShift registry will be created during install if there are
+# nodes present with labels matching the default registry selector,
+# "region=infra". Set openshift_node_labels per node as needed in
+# order to label nodes.
+#
+# Example:
+# [nodes]
+# node.example.com openshift_node_labels="{'region': 'infra'}"
+#
+# Registry selector (optional)
+# Registry will only be created if nodes matching this label are present.
+# Default value: 'region=infra'
+#openshift_hosted_registry_selector='region=infra'
+#
+# Registry replicas (optional)
+# Unless specified, openshift-ansible will calculate the replica count
+# based on the number of nodes matching the openshift registry selector.
+#openshift_hosted_registry_replicas=2
+#
+# Validity of the auto-generated certificate in days (optional)
+#openshift_hosted_registry_cert_expire_days=730
+#
+# Manage the OpenShift Registry
+#openshift_hosted_manage_registry=true
+
+# Registry Storage Options
+#
+# NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/registry"
+#openshift_hosted_registry_storage_kind=nfs
+#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+#openshift_hosted_registry_storage_host=nfs.example.com
+#openshift_hosted_registry_storage_nfs_directory=/exports
+#openshift_hosted_registry_storage_volume_name=registry
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# Openstack
+# Volume must already exist.
+#openshift_hosted_registry_storage_kind=openstack
+#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_registry_storage_openstack_filesystem=ext4
+#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
+# AWS S3
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
+#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
+#
+# Additional CloudFront Options. When using CloudFront all three
+# of the followingg variables must be defined.
+#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
+#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
+#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
+
+# Metrics deployment
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#
+# By default metrics are not automatically deployed, set this to enable them
+#openshift_metrics_install_metrics=true
+#
+# Storage Options
+# If openshift_metrics_storage_kind is unset then metrics will be stored
+# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
+# Storage options A & B currently support only one cassandra pod which is
+# generally enough for up to 1000 pods. Additional volumes can be created
+# manually after the fact and metrics scaled per the docs.
+#
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/metrics"
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/metrics"
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_metrics_storage_kind=dynamic
+#
+# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_metrics/README.md
+#
+# Override metricsPublicURL in the master config for cluster metrics
+# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
+# Currently, you may only alter the hostname portion of the url, alterting the
+# `/hawkular/metrics` path will break installation of metrics.
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
+
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_logging_install_logging=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_logging_kibana_hostname=logging.apps.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_logging_es_cluster_size=1
+# Configure the prefix and version for the component images
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
+#osm_cluster_network_cidr=10.128.0.0/14
+#openshift_portal_net=172.30.0.0/16
+
+# ExternalIPNetworkCIDRs controls what values are acceptable for the
+# service external IP field. If empty, no externalIP may be set. It
+# may contain a list of CIDRs which are checked for access. If a CIDR
+# is prefixed with !, IPs in that CIDR will be rejected. Rejections
+# will be applied first, then the IP checked against one of the
+# allowed CIDRs. You should ensure this range does not overlap with
+# your nodes, pods, or service CIDRs for security reasons.
+#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
+#osm_host_subnet_length=9
+
+# Configure master API and console ports.
+#openshift_master_api_port=8443
+#openshift_master_console_port=8443
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-1.1
+
+# Configure custom ca certificate
+#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
+#
+# NOTE: CA certificate will not be replaced with existing clusters.
+# This option may only be specified when creating a new cluster or
+# when redeploying cluster certificates with the redeploy-certificates
+# playbook.
+
+# Configure custom named certificates (SNI certificates)
+#
+# https://docs.openshift.org/latest/install_config/certificate_customization.html
+#
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+#
+# An optional CA may be specified for each named certificate. CAs will
+# be added to the OpenShift CA bundle which allows for the named
+# certificate to be served for internal cluster communication.
+#
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates=true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
+
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
+# openshift-ansible will wait indefinitely for your input when it detects that the
+# value of openshift_hostname resolves to an IP address not bound to any local
+# interfaces. This mis-configuration is problematic for any pod leveraging host
+# networking and liveness or readiness probes.
+# Setting this variable to true will override that check.
+#openshift_override_hostname_check=true
+
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
+#openshift_use_dnsmasq=False
+
+# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
+# This is useful for POC environments where DNS may not actually be available yet or to set
+# options like 'strict-order' to alter dnsmasq configuration.
+#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
+#openshift_generate_no_proxy_hosts=True
+#
+# These options configure the BuildDefaults admission controller which injects
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=mycorp.com
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256Mi
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512Mi
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
+# Admission plugin config
+#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
+
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
+# OpenShift Per-Service Environment Variables
+# Environment variables are added to /etc/sysconfig files for
+# each OpenShift service: node, master (api and controllers).
+# API and controllers environment variables are merged in single
+# master environments.
+#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
+#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
+
+# Enable API service auditing, available as of 1.3
+#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
+
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
+# Validity of the auto-generated OpenShift certificates in days.
+# See also openshift_hosted_registry_cert_expire_days above.
+#
+#openshift_ca_cert_expire_days=1825
+#openshift_node_cert_expire_days=730
+#openshift_master_cert_expire_days=730
+
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
+
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 2ae7d48a3..24523f7c8 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.147.0%{?dist}
+Release: 0.169.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -64,6 +64,9 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
mkdir -p docs/example-inventories
cp inventory/byo/* docs/example-inventories/
+# openshift-ansible-files install
+cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
+
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv plabooks
@@ -122,6 +125,7 @@ popd
%doc README*
%license LICENSE
%dir %{_datadir}/ansible/%{name}
+%{_datadir}/ansible/%{name}/files
%{_datadir}/ansible/%{name}/library
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
@@ -276,6 +280,131 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.169.0
+- Initial Kuryr support (mdulko@redhat.com)
+- Indentation errors (dymurray@redhat.com)
+- Bug 1503233 - Add liveness and readiness probe checks to ASB deploymentconfig
+ (dymurray@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
+- Change to service-signer.crt for template_service_broker CA_BUNDLE
+ (staebler@redhat.com)
+- Use service-signer.crt for ca_bundle passed to clusterservicebroker
+ (staebler@redhat.com)
+- Rename ServiceBroker to ClusterServiceBroker for ansible_service_broker task.
+ (staebler@redhat.com)
+- Add apiserver.crt to service-catalog controller-manager deployment.
+ (staebler@redhat.com)
+- Remove redundant faulty role binding ifrom
+ kubeservicecatalog_roles_bindings.yml (staebler@redhat.com)
+- Update service catalog playbook for service-catalog rc1 (staebler@redhat.com)
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.163.0
+- set use_manageiq as default (efreiber@redhat.com)
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.162.0
+- Wait longer for stable GCP instances (ccoleman@redhat.com)
+- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
+-
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
+- Fix pvc selector default to be empty dict instead of string
+ (zgalor@redhat.com)
+- Fix typo in setting prom-proxy memory limit (zgalor@redhat.com)
+- Do not remove files for bootstrap if resolv or dns. (kwoodson@redhat.com)
+- Fix missing docker option signature-verification (mgugino@redhat.com)
+- Fix prometheus role nfs (zgalor@redhat.com)
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.159.0
+- Updating openshift-ansible.spec file to include files dir
+ (sdodson@redhat.com)
+- Bug 1501768: fix eventrouter nodeSelector padding (jwozniak@redhat.com)
+- Reverting proxy image version to v1.0.0 to pass CI (ewolinet@redhat.com)
+- Making travis happy (ewolinet@redhat.com)
+- cri-o: error out when node is a Docker container (gscrivan@redhat.com)
+- Rewire openshift_template_service_broker_namespaces configurable
+ (jminter@redhat.com)
+- Ensure controllerConfig.serviceServingCert is correctly set during upgrade.
+ (abutcher@redhat.com)
+- Updating pattern for elasticsearch_proxy images (ewolinet@redhat.com)
+- Updating ES proxy image prefix and version to match other components
+ (ewolinet@redhat.com)
+- Add ability to set node and master imageConfig to latest (mgugino@redhat.com)
+- Restart all controllers to force reconfiguration during upgrade
+ (sdodson@redhat.com)
+
+* Tue Oct 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.158.0
+- Refactor openshift-management entry point (rteague@redhat.com)
+- Add switch to enable/disable container engine's audit log being stored in ES.
+ (jkarasek@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.157.0
+- data migration of embedded etcd not allowed (jchaloup@redhat.com)
+- GlusterFS: remove topology reference from deploy-heketi (jarrpa@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.156.0
+- set initial etcd cluster properly during system container scale up
+ (jchaloup@redhat.com)
+
+* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
+-
+
+* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
+- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
+- updated dynamic provision section for openshift metrics to support storage
+ class name (elvirkuric@gmail.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.150.0
+- Ensure upgrade playbook exits on health check failures (rteague@redhat.com)
+- Ensure docker is installed for containerized load balancers
+ (mgugino@redhat.com)
+- Fix containerized node service unit placement order (mgugino@redhat.com)
+- Provisioning Documentation Updates (mgugino@redhat.com)
+
+* Thu Oct 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.149.0
+- Fix broken debug_level (mgugino@redhat.com)
+- Ensure host was reached for proper conditional validation
+ (rteague@redhat.com)
+- Ensure docker service status actually changes (mgugino@redhat.com)
+- Display warnings at the end of the control plane upgrade (sdodson@redhat.com)
+- Force reconciliation of role for 3.6 (simo@redhat.com)
+- Remove etcd health check (sdodson@redhat.com)
+- migrate embedded etcd to external etcd (jchaloup@redhat.com)
+
+* Wed Oct 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.148.0
+- Bug 1490647 - logging-fluentd deployed with openshift_logging_use_mux=false
+ fails to start due to missing (nhosoi@redhat.com)
+- Fix typo in inventory example (rteague@redhat.com)
+- Separate tuned daemon setup into a role. (jmencak@redhat.com)
+- crio, docker: expect openshift_release to have 'v' (gscrivan@redhat.com)
+- rebase on master (maxamillion@fedoraproject.org)
+- Add fedora compatibility (maxamillion@fedoraproject.org)
+- Allow checkpoint status to work across all groups (rteague@redhat.com)
+- Add valid search when search does not exist on resolv.conf
+ (nakayamakenjiro@gmail.com)
+
* Tue Oct 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.147.0
- Add PartOf to docker systemd service unit. (mgugino@redhat.com)
- crio: use systemd manager (gscrivan@redhat.com)
diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md
new file mode 100644
index 000000000..468264a9a
--- /dev/null
+++ b/playbooks/aws/BUILD_AMI.md
@@ -0,0 +1,21 @@
+# Build AMI
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1. Create an instance, using a specified ssh key.
+2. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+3. Create the AMI.
+4. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+5. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md
new file mode 100644
index 000000000..4f428dcc3
--- /dev/null
+++ b/playbooks/aws/PREREQUISITES.md
@@ -0,0 +1,40 @@
+# Prerequisites
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1) vpc
+2) security group to build the AMI in.
+3) ssh keys to log into instances
+
+These items can be provisioned ahead of time, or you can utilize the plays here
+to create these items.
+
+If you wish to provision these items yourself, or you already have these items
+provisioned and wish to utilize existing components, please refer to
+provisioning_vars.yml.example.
+
+If you wish to have these items created for you, continue with this document.
+
+# Running prerequisites.yml
+
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+## Step 1:
+Ensure you have specified all the necessary provisioning variables. See
+provisioning_vars.example.yml and README.md for more information.
+
+## Step 2:
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
+```
+
+This will create a VPC, security group, and ssh_key. These plays are idempotent,
+and multiple runs should result in no additional provisioning of these components.
+
+You can also verify that you will successfully utilize existing components with
+these plays.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 816cb35b4..fbab61189 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS
deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
custom AMIs.
+To speed in the provisioning of medium and large clusters, openshift-node
+instances are created using a pre-built AMI. A list of pre-built AMIs will
+be available soon.
+
+If the deployer wishes to build their own AMI for provisioning, instructions
+to do so are provided here.
+
### Where do I start?
Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
@@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
-The newly added playbooks are the following:
-- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+#### High-level overview
+- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information.
+- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information.
- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
- install.yml - Calls the openshift-ansible installer on the newly created instances
- provision_nodes.yml - Creates the infra and compute node scale groups
@@ -41,82 +53,38 @@ The newly added playbooks are the following:
The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
+Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars
+under the appropriate groups. Most variables can exist in the 'all' group.
+
```yaml
---
-# when creating an AMI set this to True
-# when installing a cluster set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-# openshift_aws_clusterid: default
-
-# specify a region
-# openshift_aws_region: us-east-1
-
-# must specify a base_ami when building an AMI
-# openshift_aws_base_ami: # base image for AMI to build from
-# specify when using a custom AMI
-# openshift_aws_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-# openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
-# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
-# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-
-# This is required for any ec2 instances
-# openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
+# Minimum mandatory provisioning variables. See provisioning_vars.yml.example.
+# for more information.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+openshift_release: # example: v3.7
+openshift_pkg_version: # example: -3.7.0
+openshift_aws_ssh_key_name: # example: myuser_key
+openshift_aws_base_ami: # example: ami-12345678
+openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
-
-```ini
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:vars]
-################################################################################
-# Ensure these variables are set for bootstrap
-################################################################################
-# openshift_deployment_type is required for installation
-openshift_deployment_type=origin
+In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
+using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-# required when building an AMI. This will
-# be dependent on the version provided by the yum repository
-openshift_pkg_version=-3.6.0
-
-openshift_master_bootstrap_enabled=True
-
-openshift_hosted_router_wait=False
-openshift_hosted_registry_wait=False
-
-# Repository for installation
-openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
-
-################################################################################
-# cluster specific settings maybe be placed here
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-[masters]
+#### Step 0 (optional)
-[etcd]
+You may provision a VPC, Security Group, and SSH keypair to build the AMI.
-[nodes]
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
```
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+See PREREQUISITES.md for more information.
#### Step 1
@@ -126,24 +94,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with
$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create a security group.
-4. Create an instance using the key from step 2 or a specified key.
-5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
-6. Create the AMI.
-7. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-8. Terminate the instance used to configure the AMI.
-
-More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
-```
-# openshift_aws_ami_encrypt: True # defaults to false
-```
-
-**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-
#### Step 2
Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
@@ -167,16 +117,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created.
-2. Ensures a SSH key exists.
-3. Creates an s3 bucket for the registry named $clusterid-docker-registry
-4. Create master security groups.
-5. Create a master launch config.
-6. Create the master auto scaling groups.
-7. If certificates are desired for ELB, they will be uploaded.
-8. Create internal and external master ELBs.
-9. Add newly created masters to the correct groups.
-10. Set a couple of important facts for the masters.
+1. Creates an s3 bucket for the registry named $clusterid-docker-registry
+2. Create master security groups.
+3. Create a master launch config.
+4. Create the master auto scaling groups.
+5. If certificates are desired for ELB, they will be uploaded.
+6. Create internal and external master ELBs.
+7. Add newly created masters to the correct groups.
+8. Set a couple of important facts for the masters.
At this point we have successfully created the infrastructure including the master nodes.
@@ -195,13 +143,13 @@ Once this playbook completes, the cluster masters should be installed and config
#### Step 5
-Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes:
```
$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
-Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6.
#### Step 6
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index ffc367f9f..c2c8bea50 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -42,12 +42,12 @@
until: "'instances' in instancesout and instancesout.instances|length > 0"
- debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
+ msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- name: approve nodes
oc_adm_csr:
#approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
- timeout: 0
+ nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+ timeout: 60
register: nodeout
delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 1ab1e8041..ee281929a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,14 +17,6 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_vpc.yml
-
-- include: provision_ssh_keypair.yml
-
-- include: provision_sec_group.yml
- vars:
- openshift_aws_node_group_type: compute
-
- include: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -35,6 +27,7 @@
- name: set the user to perform installation
set_fact:
ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
+ openshift_node_bootstrap: True
# This is the part that installs all of the software and configs for the instance
# to become a node.
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..df77fe3bc
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -0,0 +1,8 @@
+---
+- include: provision_vpc.yml
+
+- include: provision_ssh_keypair.yml
+
+- include: provision_sec_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
deleted file mode 100644
index 28eb9c993..000000000
--- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# when creating an AMI set this option to True
-# when installing the cluster, set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-#openshift_aws_clusterid: default
-
-# must specify a base_ami when building an AMI
-#openshift_aws_base_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-#openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
-#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
-
-# This is required for any ec2 instances
-#openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
new file mode 100644
index 000000000..238a7eb2f
--- /dev/null
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -0,0 +1,25 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
new file mode 100644
index 000000000..aa91363ae
--- /dev/null
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -0,0 +1,120 @@
+---
+# Variables that are commented in this file are optional; uncommented variables
+# are mandatory.
+
+# Default values for each variable are provided, as applicable.
+# Example values for mandatory variables are provided as a comment at the end
+# of the line.
+
+# ------------------------ #
+# Common/Cluster Variables #
+# ------------------------ #
+# Variables in this section affect all areas of the cluster
+
+# Deployment type must be specified.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+
+# openshift_release must be specified. Use whatever version of openshift
+# that is supported by openshift-ansible that you wish.
+openshift_release: # v3.7
+
+# This will be dependent on the version provided by the yum repository
+openshift_pkg_version: # -3.7.0
+
+# specify a clusterid
+# This value is also used as the default value for many other components.
+#openshift_aws_clusterid: default
+
+# AWS region
+# This value will instruct the plays where all items should be created.
+# Multi-region deployments are not supported using these plays at this time.
+#openshift_aws_region: us-east-1
+
+#openshift_aws_create_launch_config: true
+#openshift_aws_create_scale_group: true
+
+# --- #
+# VPC #
+# --- #
+
+# openshift_aws_create_vpc defaults to true. If you don't wish to provision
+# a vpc, set this to false.
+#openshift_aws_create_vpc: true
+
+# Name of the vpc. Needs to be set if using a pre-existing vpc.
+#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
+# vpc + subnet.
+#openshift_aws_subnet_name:
+
+# -------------- #
+# Security Group #
+# -------------- #
+
+# openshift_aws_create_security_groups defaults to true. If you wish to use
+# an existing security group, set this to false.
+#openshift_aws_create_security_groups: true
+
+# openshift_aws_build_ami_group is the name of the security group to build the
+# ami in. This defaults to the value of openshift_aws_clusterid.
+#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+
+# openshift_aws_launch_config_security_groups specifies the security groups to
+# apply to the launch config. The launch config security groups will be what
+# the cluster actually is deployed in.
+#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml
+
+# openshift_aws_node_security_groups are created when
+# openshift_aws_create_security_groups is set to true.
+#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml
+
+# -------- #
+# ssh keys #
+# -------- #
+
+# Specify the key pair name here to connect to the provisioned instances. This
+# can be an existing key, or it can be one of the keys specified in
+# openshift_aws_users
+openshift_aws_ssh_key_name: # myuser_key
+
+# This will ensure these user and public keys are created.
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
+
+# When building the AMI, specify the user to ssh to the instance as.
+# openshift_aws_build_ami_ssh_user: root
+
+# --------- #
+# AMI Build #
+# --------- #
+# Variables in this section apply to building a node AMI for use in your
+# openshift cluster.
+
+# must specify a base_ami when building an AMI
+openshift_aws_base_ami: # ami-12345678
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# -- #
+# S3 #
+# -- #
+
+# Create an s3 bucket.
+#openshift_aws_create_s3: True
+
+# --- #
+# ELB #
+# --- #
+
+# openshift_aws_elb_name will be the base-name of the ELBs.
+#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}"
+
+# custom certificates are required for the ELB
+openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..6690a7624
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/embedded2external.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/embedded2external.yml
diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml
index 33a555cc1..e8795ef85 100644
--- a/playbooks/byo/openshift-management/config.yml
+++ b/playbooks/byo/openshift-management/config.yml
@@ -1,7 +1,5 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/evaluate_groups.yml
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
index ebd6fb261..a1fb1cdc4 100644
--- a/playbooks/byo/openshift-management/uninstall.yml
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -1,6 +1,4 @@
---
# - include: ../openshift-cluster/initialize_groups.yml
-# tags:
-# - always
- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index b399ea995..395eb51f1 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -8,7 +8,10 @@
vars:
- r_openshift_health_checker_playbook_context: install
post_tasks:
- - action: openshift_health_check
+
+ - name: Verify Requirements - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
args:
checks:
- disk_availability
@@ -17,6 +20,12 @@
- package_version
- docker_image_availability
- docker_storage
+ - name: Verify Requirements - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
- include: ../openshift-etcd/config.yml
@@ -46,7 +55,7 @@
- include: service_catalog.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: openshift_management.yml
+- include: ../openshift-management/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
index 8a60a30b8..ec6f2c52c 100644
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -1,4 +1,13 @@
---
+- name: Create persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ tasks:
+ - debug: var=persistent_volumes
+ - debug: var=persistent_volume_claims
+
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index e55b2f964..78b552279 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -51,7 +51,7 @@
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- not openshift_master_unsupported_embedded_etcd | default(False)
- - not openshift_node_bootstrap | default(False)
+ - not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 32e5e708a..c1536eb36 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,7 +1,6 @@
---
- name: Hosted Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'In Progress'
@@ -26,8 +25,7 @@
when: openshift_hosted_prometheus_deploy | default(False) | bool
- name: Hosted Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 69f50fbcd..529a4c939 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,7 +1,6 @@
---
- name: Logging Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
@@ -24,8 +23,7 @@
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml
deleted file mode 100644
index 6e582920b..000000000
--- a/playbooks/common/openshift-cluster/openshift_management.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Management Install Checkpoint Start
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'In Progress'
- set_stats:
- data:
- installer_phase_Management: "In Progress"
- aggregate: false
-
-- name: Management
- include: ../openshift-management/config.yml
-
-- name: Management Install Checkpoint End
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'Complete'
- set_stats:
- data:
- installer_phase_Management: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index e369dcd86..9c0bd489b 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,6 @@
---
- name: Metrics Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
@@ -25,8 +24,7 @@
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 95a8f601c..bd964b2ce 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,7 +1,6 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
@@ -20,8 +19,7 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 090ad6445..45b34c8bd 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,7 +1,6 @@
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
roles:
- installer_checkpoint
@@ -37,8 +36,7 @@
- always
- name: Initialization Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set install initialization 'Complete'
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 07e521a89..122066955 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -103,9 +103,16 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
@@ -121,12 +128,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index ad6325ca0..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,12 +1,14 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index da47491c1..c37a5f9ab 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -31,7 +31,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -193,6 +192,7 @@
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
@@ -231,6 +231,44 @@
- reconcile_jenkins_role_binding_result.rc == 0
when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool)
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
+
- name: Reconcile Security Context Constraints
command: >
{{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index d69472fad..5e7a66171 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -41,12 +41,12 @@
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
- modify_yaml:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index ed89dbe8d..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -1,16 +1,15 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index df59a8782..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -1,21 +1,20 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index f1ca1edb9..bf3b94682 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -119,8 +119,24 @@
tasks:
- include: ../cleanup_unused_images.yml
+#TODO: Why doesn't this compose using ./upgrade_control_plane rather than
+# ../upgrade_control_plane?
- include: ../upgrade_control_plane.yml
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
- include: ../upgrade_nodes.yml
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 6c4f9671b..b91bea617 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -128,4 +128,18 @@
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
index 31a0f50d8..eb6b94f33 100644
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -1,29 +1,4 @@
---
-- name: Create etcd server certificates for etcd hosts
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+- include: server_certificates.yml
-- name: Create etcd client certificates for master hosts
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 82539dac8..48d46bbb0 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,7 +1,6 @@
---
- name: etcd Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
@@ -27,8 +26,7 @@
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..9264f3c32
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 2456ad3a8..31362f2f6 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,4 +1,17 @@
---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
@@ -60,12 +73,11 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
- - name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index b5ba2bbba..20061366c 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -46,7 +46,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -71,7 +71,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 516618de2..80cda9e21 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,7 +1,6 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
@@ -37,8 +36,7 @@
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index ecbb092bc..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,7 +1,6 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
@@ -10,6 +9,15 @@
installer_phase_loadbalancer: "In Progress"
aggregate: false
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -25,12 +33,11 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
+ - role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
index 0aaafe440..908679e81 100644
--- a/playbooks/common/openshift-management/config.yml
+++ b/playbooks/common/openshift-management/config.yml
@@ -1,4 +1,14 @@
---
+- name: Management Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_management: "In Progress"
+ aggregate: false
+
- name: Setup CFME
hosts: oo_first_master
pre_tasks:
@@ -13,3 +23,13 @@
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_management: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index ee76e2ed7..e1472ce38 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,7 +1,6 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
@@ -26,7 +25,7 @@
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift_use_manageiq | default(false) | bool
+ when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- openshift.common.is_atomic
@@ -37,8 +36,7 @@
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index bc1fee982..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,7 +1,6 @@
---
- name: Master Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
@@ -198,6 +197,7 @@
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
- role: nuage_ca
when: openshift_use_nuage | default(false) | bool
- role: nuage_common
@@ -206,6 +206,12 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -226,8 +232,7 @@
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'Complete'
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 66303d6f7..ce672daf5 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,7 +1,6 @@
---
- name: NFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
@@ -17,8 +16,7 @@
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 700aab48c..4f8f98aef 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,7 +1,6 @@
---
- name: Node Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'In Progress'
@@ -25,8 +24,7 @@
- include: enable_excluders.yml
- name: Node Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'Complete'
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
index c96e4921c..17259422d 100644
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -13,4 +13,5 @@
roles:
- role: os_firewall
- role: openshift_node
+ - role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
index fc06621ee..00d167c22 100644
--- a/playbooks/common/openshift-node/image_prep.yml
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -2,13 +2,13 @@
- name: normalize groups
include: ../../byo/openshift-cluster/initialize_groups.yml
-- name: run the std_include
+- name: evaluate the groups
include: ../openshift-cluster/evaluate_groups.yml
-- name: run the std_include
+- name: initialize the facts
include: ../openshift-cluster/initialize_facts.yml
-- name: run the std_include
+- name: initialize the repositories
include: ../openshift-cluster/initialize_openshift_repos.yml
- name: run node config setup
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 0f4b71124..9a91927b8 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -30,8 +30,12 @@
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+- set_fact:
+ openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ when: openshift_master_config_dir is undefined
+
- slurp:
- src: "{{ ansible_service_broker_certs_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: catalog_ca
@@ -231,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -327,12 +345,12 @@
oc_obj:
name: ansible-service-broker
state: present
- kind: ServiceBroker
+ kind: ClusterServiceBroker
content:
path: /tmp/brokerout
data:
- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+ apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: ansible-service-broker
spec:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index e36dfa7b9..1c830cb4e 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,5 +1,6 @@
---
docker_cli_auth_config_path: '/root/.docker'
+openshift_docker_signature_verification: False
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index dbe0b0d28..7ccab37a5 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -115,11 +115,12 @@
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if docker_options is defined %} {{ docker_options }}{% endif %} \
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \
+ --signature-verification={{ openshift_docker_signature_verification | bool }}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- restart docker
@@ -139,6 +140,13 @@
notify:
- restart docker
+# The following task is needed as the systemd module may report a change in
+# state even though docker is already running.
+- name: Detect if docker is already started
+ command: "systemctl show docker -p ActiveState"
+ changed_when: False
+ register: r_docker_already_running_result
+
- name: Start the Docker service
systemd:
name: docker
@@ -151,7 +159,7 @@
delay: 30
- set_fact:
- docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}"
+ docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
- name: Check for credentials file for registry auth
stat:
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index fdc6cd24a..a79600930 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -36,6 +36,12 @@
state: present
when: not openshift.common.is_atomic | bool
+- name: Check we are not using node as a Docker container with CRI-O
+ fail: msg='Cannot use CRI-O with node configured as a Docker container'
+ when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+
# Used to pull and install the system container
- name: Ensure atomic is installed
package:
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 807b9541a..78f231416 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -70,7 +70,8 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_peer: 127.0.0.1
+# required role variable
+#etcd_peer: 127.0.0.1
etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
diff --git a/roles/etcd/tasks/auxiliary/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml
index 95a0e7c0a..1ed2db5bc 100644
--- a/roles/etcd/tasks/auxiliary/clean_data.yml
+++ b/roles/etcd/tasks/auxiliary/clean_data.yml
@@ -1,5 +1,5 @@
---
- name: Remove member data
file:
- path: /var/lib/etcd/member
+ path: "{{ etcd_data_dir }}/member"
state: absent
diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml
new file mode 100644
index 000000000..7c6d0409d
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml
@@ -0,0 +1,5 @@
+---
+- name: Disable etcd members
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
new file mode 100644
index 000000000..ae8a36130
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
@@ -0,0 +1,31 @@
+---
+- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ backup: true
+
+- name: Start etcd
+ systemd:
+ name: "{{ etcd_service }}"
+ state: started
+
+- name: Wait for cluster to become healthy after bringing up first member
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
+
+- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ state: absent
+ backup: true
+
+- name: Restart first etcd host
+ systemd:
+ name: "{{ etcd_service }}"
+ state: restarted
diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml
new file mode 100644
index 000000000..6daa6dc51
--- /dev/null
+++ b/roles/etcd/tasks/backup.archive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/archive.yml
diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml
new file mode 100644
index 000000000..cc540cbca
--- /dev/null
+++ b/roles/etcd/tasks/backup.copy.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/copy.yml
diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml
new file mode 100644
index 000000000..26ec15043
--- /dev/null
+++ b/roles/etcd/tasks/backup.fetch.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/fetch.yml
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
new file mode 100644
index 000000000..24bd0540d
--- /dev/null
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -0,0 +1,12 @@
+---
+- include: backup/vars.yml
+
+- name: Move content of etcd backup under the etcd data directory
+ command: >
+ mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}"
+
+- name: Set etcd group for the etcd data directory
+ command: >
+ chown -R etcd:etcd "{{ l_etcd_data_dir }}"
+
+- include: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml
new file mode 100644
index 000000000..77a637360
--- /dev/null
+++ b/roles/etcd/tasks/backup.unarchive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/unarchive.yml
diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml
new file mode 100644
index 000000000..f6aa68a6e
--- /dev/null
+++ b/roles/etcd/tasks/backup/archive.yml
@@ -0,0 +1,5 @@
+---
+- name: Archive backup
+ archive:
+ path: "{{ l_etcd_backup_dir }}"
+ dest: "{{ l_etcd_backup_dir }}.tgz"
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index 42d27c081..ec1a1989c 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -1,21 +1,5 @@
---
-# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
-# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
-# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
-- set_fact:
- l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
-
-- set_fact:
- l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
-
-- set_fact:
- l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
-
-- set_fact:
- l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
-
-- set_fact:
- l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+- include: vars.yml
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml
new file mode 100644
index 000000000..16604bae8
--- /dev/null
+++ b/roles/etcd/tasks/backup/copy.yml
@@ -0,0 +1,5 @@
+---
+- name: Copy etcd backup
+ copy:
+ src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
+ dest: "{{ l_etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml
new file mode 100644
index 000000000..610ce1960
--- /dev/null
+++ b/roles/etcd/tasks/backup/fetch.yml
@@ -0,0 +1,8 @@
+---
+- name: Fetch etcd backup
+ fetch:
+ src: "{{ l_etcd_backup_dir }}.tgz"
+ dest: "{{ etcd_backup_sync_directory }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml
new file mode 100644
index 000000000..6c75d00a7
--- /dev/null
+++ b/roles/etcd/tasks/backup/unarchive.yml
@@ -0,0 +1,14 @@
+---
+- shell: ls /var/lib/etcd
+ register: output
+
+- debug:
+ msg: "output: {{ output }}"
+
+- name: Unarchive backup
+ # can't use unarchive https://github.com/ansible/ansible/issues/30821
+ # unarchive:
+ # src: "{{ l_etcd_backup_dir }}.tgz"
+ # dest: "{{ l_etcd_backup_dir }}"
+ command: >
+ tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml
new file mode 100644
index 000000000..3c009f557
--- /dev/null
+++ b/roles/etcd/tasks/backup/vars.yml
@@ -0,0 +1,18 @@
+---
+# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
+# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
+# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
+- set_fact:
+ l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+
+- set_fact:
+ l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
+
+- set_fact:
+ l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
+
+- set_fact:
+ l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
+
+- set_fact:
+ l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..129e1831c
--- /dev/null
+++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_master_etcd_certificates.yml
diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..e65b3e5a2
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
@@ -0,0 +1,7 @@
+---
+- name: Backup master etcd certificates
+ shell: >
+ tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ /etc/origin/master/master.etcd-*
+ args:
+ warn: no
diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml
new file mode 100644
index 000000000..75c110972
--- /dev/null
+++ b/roles/etcd/tasks/check_cluster_health.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check_cluster_health.yml
diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml
new file mode 100644
index 000000000..9202e6e48
--- /dev/null
+++ b/roles/etcd/tasks/disable_etcd.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/disable_etcd.yml
diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml
new file mode 100644
index 000000000..513eed17a
--- /dev/null
+++ b/roles/etcd/tasks/fetch_backup.yml
@@ -0,0 +1,8 @@
+---
+- include: backup/vars.yml
+
+- include: backup/archive.yml
+
+- include: backup/sync_backup.yml
+
+- include: backup/
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index e735bf50a..024479fb4 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -17,6 +17,7 @@
{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
+ when: etcd_initial_cluster is undefined
- name: Check etcd system container package
command: >
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 8462bb4c8..3027a9447 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -29,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1
{% else %}
-{% if initial_etcd_cluster is defined and initial_etcd_cluster %}
-ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }}
+{% if etcd_initial_cluster is defined and etcd_initial_cluster %}
+ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
{% else %}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
{% endif %}
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index 321acca21..83e00e504 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -92,8 +92,7 @@ phase/component and then a final play for setting `installer_hase_initialize` to
# common/openshift-cluster/std_include.yml
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
roles:
- installer_checkpoint
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index ac369b882..25f9405af 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -136,7 +136,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_management': {
'title': 'Management Install',
- 'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml'
+ 'playbook': 'playbooks/byo/openshift-management/config.yml'
},
}
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
new file mode 100644
index 000000000..7b618f902
--- /dev/null
+++ b/roles/kuryr/README.md
@@ -0,0 +1,38 @@
+## OpenStack Kuryr
+
+Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker
+nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for
+pods. This allows to have interconnectivity between pods and OpenStack VMs.
+
+## Requirements
+
+* Ansible 2.2+
+* Centos/ RHEL 7.3+
+
+## Current Kuryr restrictions when used with OpenShift
+
+* Openshift Origin only
+* OpenShift on OpenStack Newton or newer (only with Trunk ports)
+
+## Key Ansible inventory Kuryr master configuration parameters
+
+* ``openshift_use_kuryr=True``
+* ``openshift_use_openshift_sdn=False``
+* ``openshift_sdn_network_plugin_name='cni'``
+* ``kuryr_cni_link_interface=eth0``
+* ``kuryr_openstack_auth_url=keystone_url``
+* ``kuryr_openstack_user_domain_name=Default``
+* ``kuryr_openstack_user_project_name=Default``
+* ``kuryr_openstack_project_id=project_uuid``
+* ``kuryr_openstack_username=kuryr``
+* ``kuryr_openstack_password=kuryr_pass``
+* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid``
+* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid``
+* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
+* ``kuryr_openstack_pod_project_id=pod_project_uuid``
+* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+
+## Kuryr resources
+
+* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/)
+* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
new file mode 100644
index 000000000..ff298dda0
--- /dev/null
+++ b/roles/kuryr/defaults/main.yaml
@@ -0,0 +1,72 @@
+---
+# Kuryr conf directory
+kuryr_config_dir: /etc/kuryr
+
+# Kuryr username
+kuryr_openstack_username: kuryr
+
+# Kuryr username domain
+kuryr_openstack_user_domain_name: default
+
+# Kuryr username domain
+kuryr_openstack_project_domain_name: default
+
+# Kuryr OpenShift namespace
+kuryr_namespace: kube-system
+
+# Whether to run the cni plugin in debug mode
+kuryr_cni_debug: "false"
+
+# The version of cni binaries
+cni_version: v0.5.2
+
+# Path to bin dir (where kuryr execs get installed)
+bin_dir: /usr/bin
+
+# Path to the cni binaries
+cni_bin_dir: /opt/cni/bin
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
+cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
+
+# Kuryr ClusterRole definiton
+kuryr_clusterrole:
+ name: kuryrctl
+ state: present
+ rules:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - get
+ - list
+ - watch
+ resources:
+ - daemonsets
+ - deployments
+ - deploymentconfigs
+ - endpoints
+ - ingress
+ - nodes
+ - namespaces
+ - pods
+ - projects
+ - routes
+ - services
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - update
+ - patch
+ resources:
+ - endpoints
+ - ingress
+ - pods
+ - namespaces
+ - nodes
+ - services
+ - services/status
+ - routes
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
new file mode 100644
index 000000000..7fd5adf41
--- /dev/null
+++ b/roles/kuryr/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Red Hat
+ description: Kuryr networking
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
new file mode 100644
index 000000000..55ab16f74
--- /dev/null
+++ b/roles/kuryr/tasks/master.yaml
@@ -0,0 +1,52 @@
+---
+- name: Perform OpenShit ServiceAccount config
+ include: serviceaccount.yaml
+
+- name: Create kuryr manifests tempdir
+ command: mktemp -d
+ register: manifests_tmpdir
+
+- name: Create kuryr ConfigMap manifest
+ become: yes
+ template:
+ src: configmap.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Create kuryr-controller Deployment manifest
+ become: yes
+ template:
+ src: controller-deployment.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Create kuryr-cni DaemonSet manifest
+ become: yes
+ template:
+ src: cni-daemonset.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+
+- name: Apply ConfigMap manifest
+ oc_obj:
+ state: present
+ kind: ConfigMap
+ name: "kuryr-config"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Apply Controller Deployment manifest
+ oc_obj:
+ state: present
+ kind: Deployment
+ name: "kuryr-controller"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Apply kuryr-cni DaemonSet manifest
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "kuryr-cni-ds"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
new file mode 100644
index 000000000..ffe814713
--- /dev/null
+++ b/roles/kuryr/tasks/node.yaml
@@ -0,0 +1,48 @@
+---
+- name: Create CNI bin directory
+ file:
+ state: directory
+ path: "{{ cni_bin_dir }}"
+ mode: 0755
+ owner: root
+ group: root
+ recurse: yes
+
+- name: Create CNI extraction tempdir
+ command: mktemp -d
+ register: cni_tmpdir
+
+- name: Download CNI
+ get_url:
+ url: "{{ cni_bin_url }}"
+ checksum: "sha1:{{ cni_bin_checksum }}"
+ mode: 0644
+ dest: "{{ cni_tmpdir.stdout }}"
+ register: downloaded_tarball
+
+- name: Extract CNI
+ become: yes
+ unarchive:
+ remote_src: True
+ src: "{{ downloaded_tarball.dest }}"
+ dest: "{{ cni_bin_dir }}"
+ when: downloaded_tarball.changed
+
+- name: Ensure CNI net.d exists
+ file:
+ path: /etc/cni/net.d
+ recurse: yes
+ state: directory
+
+- name: Configure OpenShift node with disabled service proxy
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ regexp: '^OPTIONS="?(.*?)"?$'
+ backrefs: yes
+ backup: yes
+ line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+
+- name: force node restart to disable the proxy
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml
new file mode 100644
index 000000000..088f13091
--- /dev/null
+++ b/roles/kuryr/tasks/serviceaccount.yaml
@@ -0,0 +1,31 @@
+---
+- name: Create Controller service account
+ oc_serviceaccount:
+ name: kuryr-controller
+ namespace: "{{ kuryr_namespace }}"
+ register: saout
+
+- name: Create a role for the Kuryr
+ oc_clusterrole: "{{ kuryr_clusterrole }}"
+
+- name: Fetch the created Kuryr controller cluster role
+ oc_clusterrole:
+ name: kuryrctl
+ state: list
+ register: crout
+
+- name: Grant Kuryr the privileged security context constraints
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+- name: Assign role to Kuryr service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: cluster-role
+ resource_name: "{{ crout.results.results.metadata.name }}"
+ state: present
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
new file mode 100644
index 000000000..39348ae90
--- /dev/null
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -0,0 +1,53 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kuryr-cni-ds
+ namespace: {{ kuryr_namespace }}
+ labels:
+ tier: node
+ app: kuryr
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: kuryr
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: kuryr-controller
+ containers:
+ - name: kuryr-cni
+ image: kuryr/cni:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "cni_ds_init" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: bin
+ mountPath: /opt/cni/bin
+ - name: net-conf
+ mountPath: /etc/cni/net.d
+ - name: config-volume
+ mountPath: /tmp/kuryr/kuryr.conf
+ subPath: kuryr-cni.conf
+ - name: etc
+ mountPath: /etc
+ volumes:
+ - name: bin
+ hostPath:
+ path: {{ cni_bin_dir }}
+ - name: net-conf
+ hostPath:
+ path: /etc/cni/net.d
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ - name: etc
+ hostPath:
+ path: /etc \ No newline at end of file
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
new file mode 100644
index 000000000..e874d6c25
--- /dev/null
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -0,0 +1,343 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kuryr-config
+ namespace: {{ kuryr_namespace }}
+data:
+ kuryr.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # DEPRECATED: If set to false, the logging level will be set to WARNING instead
+ # of the default INFO level. (boolean value)
+ # This option is deprecated for removal.
+ # Its value may be silently ignored in the future.
+ #verbose = true
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/kuryr-controller.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ #use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = eth0
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ kuryr-cni.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/cni.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = {{ kuryr_cni_link_interface }}
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # The token to talk to the k8s API
+ token_file = /etc/kuryr/token
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /etc/kuryr/ca.crt
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
new file mode 100644
index 000000000..d970270b5
--- /dev/null
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -0,0 +1,40 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ namespace: {{ kuryr_namespace }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ spec:
+ serviceAccountName: kuryr-controller
+ automountServiceAccountToken: true
+ hostNetwork: true
+ containers:
+ - image: kuryr/controller:latest
+ imagePullPolicy: IfNotPresent
+ name: controller
+ terminationMessagePath: "/dev/termination-log"
+ # FIXME(dulek): This shouldn't be required, but without it selinux is
+ # complaining about access to kuryr.conf.
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: "/etc/kuryr/kuryr.conf"
+ subPath: kuryr.conf
+ volumes:
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ defaultMode: 0666
+ restartPolicy: Always
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index d1dc4caf8..324f52689 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py
index ea11c6ca9..22b8f9165 100644
--- a/roles/lib_openshift/src/class/oc_adm_csr.py
+++ b/roles/lib_openshift/src/class/oc_adm_csr.py
@@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index ff96081fe..4aca5c7a8 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -1,7 +1,29 @@
openshift_aws
==================================
-Provision AWS infrastructure helpers.
+Provision AWS infrastructure and instances.
+
+This role contains many task-areas to provision resources and perform actions
+against an AWS account for the purposes of dynamically building an openshift
+cluster.
+
+This role is primarily intended to be used with "include_role" and "tasks_from".
+
+include_role can be called from the tasks section in a play. See example
+playbook below for reference.
+
+These task-areas are:
+
+* provision a vpc: vpc.yml
+* provision elastic load balancers: elb.yml
+* upload IAM ssl certificates to use with load balancers: iam_cert.yml
+* provision an S3 bucket: s3.yml
+* provision an instance to build an AMI: provision_instance.yml
+* provision a security group in AWS: security_group.yml
+* provision ssh keys and users in AWS: ssh_keys.yml
+* provision an AMI in AWS: seal_ami.yml
+* provision scale groups: scale_group.yml
+* provision launch configs: launch_config.yml
Requirements
------------
@@ -9,56 +31,9 @@ Requirements
* Ansible 2.3
* Boto
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value
-|---------------------------------------------------|-----------------------
-| openshift_aws_clusterid | default
-| openshift_aws_elb_scheme | internet-facing
-| openshift_aws_launch_config_bootstrap_token | ''
-| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
-| openshift_aws_ami_copy_wait | False
-| openshift_aws_users | []
-| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_node_group_type | master
-| openshift_aws_elb_cert_arn | ''
-| openshift_aws_kubernetes_cluster_status | owned
-| openshift_aws_s3_mode | create
-| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_ssh_keys | False
-| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
-| openshift_aws_use_custom_ami | False
-| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
-| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
-| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
-| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
-| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_security_groups | False
-| openshift_aws_create_iam_cert | False
-| openshift_aws_create_scale_group | True
-| openshift_aws_ami_encrypt | False
-| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
-| openshift_aws_region | us-east-1
-| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_elb_idle_timout | 400
-| openshift_aws_subnet_name | us-east-1c
-| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
-| openshift_aws_create_launch_config | True
-| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
-| openshift_aws_ami_name | openshift-gi
-| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
-|
-
-
-Dependencies
-------------
+Appropriate AWS credentials and permissions are required.
+
+
Example Playbook
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index ea09857b0..5371588cf 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -4,7 +4,6 @@ openshift_aws_create_iam_cert: True
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_kubernetes_cluster_status: owned # or shared
openshift_aws_node_group_type: master
openshift_aws_wait_for_ssh: True
@@ -13,6 +12,7 @@ openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
@@ -89,6 +89,10 @@ openshift_aws_node_group_config_node_volumes:
delete_on_termination: True
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_termination_policy: Default
+openshift_aws_node_group_replace_instances: []
+openshift_aws_node_group_replace_all_instances: False
+openshift_aws_node_group_config_extra_labels: {}
openshift_aws_node_group_config:
tags: "{{ openshift_aws_node_group_config_tags }}"
@@ -105,7 +109,11 @@ openshift_aws_node_group_config:
tags:
host-type: master
sub-host-type: default
+ labels:
+ type: master
wait_for_instances: True
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
compute:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -119,6 +127,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: compute
+ labels:
+ type: compute
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
infra:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -132,6 +144,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: infra
+ labels:
+ type: infra
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
openshift_aws_elb_security_groups:
- "{{ openshift_aws_clusterid }}"
@@ -211,3 +227,7 @@ openshift_aws_vpc:
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+
+openshift_aws_node_run_bootstrap_startup: True
+openshift_aws_node_user_data: ''
+openshift_aws_node_config_namespace: openshift-node
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index e6be9969c..8b7b02a0e 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -4,6 +4,11 @@
when:
- openshift_aws_ami is undefined
+- fail:
+ msg: "Ensure that openshift_deployment_type is defined."
+ when:
+ - openshift_deployment_type is undefined
+
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -27,23 +32,7 @@
image_id: "{{ openshift_aws_ami }}"
instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- user_data: |-
- #cloud-config
- {% if openshift_aws_node_group_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ openshift_aws_node_group_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
ebs_optimized: False
volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 1384bae59..25ae6ce1c 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -1,4 +1,8 @@
---
+- name: set openshift_node_bootstrap to True when building AMI
+ set_fact:
+ openshift_node_bootstrap: True
+
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -53,10 +57,6 @@
timeout: 300
search_regex: OpenSSH
-- name: Pause 10 seconds to ensure ssh actually accepts logins
- pause:
- seconds: 20
-
- name: add host to nodes
add_host:
groups: nodes
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 3e969fc43..eb31636e7 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -28,5 +28,7 @@
load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}"
tags:
- "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 0cb749dcc..d319fdd1a 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,4 +1,11 @@
---
+- name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
+
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
new file mode 100644
index 000000000..ed9c0ed0b
--- /dev/null
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -0,0 +1,26 @@
+{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %}
+{{ openshift_aws_node_user_data }}
+{% else %}
+#cloud-config
+write_files:
+- path: /root/openshift_bootstrap/openshift_settings.yaml
+ owner: 'root:root'
+ permissions: '0640'
+ content: |
+ openshift_group_type: {{ openshift_aws_node_group_type }}
+{% if openshift_aws_node_group_type != 'master' %}
+- path: /etc/origin/node/csr_kubeconfig
+ owner: 'root:root'
+ permissions: '0640'
+ encoding: b64
+ content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }}
+{% endif %}
+runcmd:
+{% if openshift_aws_node_run_bootstrap_startup %}
+- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
+{% endif %}
+{% if openshift_aws_node_group_type != 'master' %}
+- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+{% endif %}
+{% endif %}
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3a866cedf..7a5bebf6f 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -6,19 +6,46 @@
block:
- - name: Install docker excluder
+ - name: Install docker excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "yum"
- - name: Install openshift excluder
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install docker excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "dnf"
+
+ - name: Install openshift excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "yum"
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install openshift excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "dnf"
- set_fact:
r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 215ff4b72..33028fea4 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -498,6 +498,20 @@ def set_selectors(facts):
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
facts['hosted']['etcd']['selector'] = None
+ if 'prometheus' not in facts:
+ facts['prometheus'] = {}
+ if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
+ facts['prometheus']['selector'] = None
+ if 'alertmanager' not in facts['prometheus']:
+ facts['prometheus']['alertmanager'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
+ facts['prometheus']['alertmanager']['selector'] = None
+ if 'alertbuffer' not in facts['prometheus']:
+ facts['prometheus']['alertbuffer'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
+ facts['prometheus']['alertbuffer']['selector'] = None
return facts
@@ -1779,7 +1793,8 @@ class OpenShiftFacts(object):
'node',
'logging',
'loggingops',
- 'metrics']
+ 'metrics',
+ 'prometheus']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1907,7 +1922,6 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- debug_level=2,
config_base='/etc/origin')
if 'master' in roles:
@@ -2069,6 +2083,66 @@ class OpenShiftFacts(object):
)
)
+ defaults['prometheus'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertmanager'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertmanager',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertbuffer'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertbuffer',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index d72a11de1..64c7cd019 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -313,7 +313,7 @@ fi
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
# wait for stable {{ node_group.name }}
-( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
+( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
{% endfor %}
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 47dc9171d..8fc70cecb 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -16,4 +16,4 @@
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics]
+ with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 829c78728..69eb9283d 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -69,6 +69,9 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination.
+- `openshift_logging_fluentd_audit_container_engine`: When `openshift_logging_fluentd_audit_container_engine` is set to `True`, the audit log of the container engine will be collected and stored in ES.
+- `openshift_logging_fluentd_audit_file`: Location of audit log file. The default is `/var/log/audit/audit.log`
+- `openshift_logging_fluentd_audit_pos_file`: Location of fluentd in_tail position file for the audit log file. The default is `/var/log/audit/audit.log.pos`
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 554aa5bb2..fc48b7f71 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -40,8 +40,6 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy"
-openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 8380a25f9..44f6b00f3 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -17,6 +17,17 @@
- include: determine_version.yaml
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set elasticsearch_prefix image facts
+ set_fact:
+ openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}"
+ openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}"
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -52,7 +63,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- copy:
@@ -66,7 +77,7 @@
kind: clusterrole
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/rolebinding-reader.yml"
+ - "{{ tempdir }}/rolebinding-reader.yml"
delete_after: true
# SA roles
@@ -107,8 +118,8 @@
- fail:
msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
when:
- - "prometheus_out.stderr | length > 0"
- - "'already exists' not in prometheus_out.stderr"
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
# View role and binding
- name: Generate logging-elasticsearch-view-role
@@ -120,8 +131,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -131,18 +142,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -198,22 +209,22 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -227,7 +238,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -240,8 +251,8 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
- name: Set logging-{{ es_component}}-prometheus service
oc_service:
@@ -251,9 +262,9 @@
labels:
logging-infra: 'support'
ports:
- - name: proxy
- port: 443
- targetPort: 4443
+ - name: proxy
+ port: 443
+ targetPort: 4443
selector:
component: "{{ es_component }}-prometheus"
provider: openshift
@@ -281,46 +292,46 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
@@ -341,6 +352,7 @@
logging_component: elasticsearch
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
+ proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
@@ -356,7 +368,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
@@ -364,37 +376,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -425,7 +437,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 1ed886627..ce3b2eb83 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -40,7 +40,7 @@ spec:
{% endif %}
containers:
- name: proxy
- image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}}
+ image: {{ proxy_image }}
imagePullPolicy: Always
args:
- --upstream-ca=/etc/elasticsearch/secret/admin-ca
@@ -86,7 +86,7 @@ spec:
requests:
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
- securityContext: {{ es_container_security_context | to_yaml }}
+ securityContext: {{ es_container_security_context | to_yaml }}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml
new file mode 100644
index 000000000..b7d105caf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
+__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..c87d48e27
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 9ff4c7e80..ea1fd3efd 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -54,9 +54,9 @@ objects:
serviceAccount: aggregated-logging-eventrouter
serviceAccountName: aggregated-logging-eventrouter
{% if node_selector is iterable and node_selector | length > 0 %}
- nodeSelector:
+ nodeSelector:
{% for key, value in node_selector.iteritems() %}
- {{ key }}: "{{ value }}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 82326bdd1..25f7580a4 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -56,3 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_secureforward_contents:
openshift_logging_fluentd_file_buffer_limit: 1Gi
+
+# Configure fluentd to tail audit log file and filter out container engine's logs from there
+# These logs are then stored in ES operation index
+openshift_logging_fluentd_audit_container_engine: False
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 37960afd1..06bb35dbc 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -108,7 +108,6 @@
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
when: fluentd_secureforward_contents is undefined
-
changed_when: no
- copy:
@@ -173,6 +172,9 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
+ audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
+ audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index b5f27b60d..644b70031 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -66,7 +66,9 @@ spec:
readOnly: true
- name: filebufferstorage
mountPath: /var/lib/fluentd
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
mountPath: /etc/fluent/muxkeys
readOnly: true
@@ -114,7 +116,9 @@ spec:
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: "MUX_CLIENT_MODE"
value: "{{ openshift_logging_mux_client_mode }}"
{% endif %}
@@ -168,6 +172,28 @@ spec:
value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
{% endif %}
+{% if audit_container_engine %}
+ - name: "AUDIT_CONTAINER_ENGINE"
+ value: "{{ audit_container_engine | lower }}"
+{% endif %}
+
+{% if audit_container_engine %}
+ - name: "NODE_NAME"
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+{% endif %}
+
+{% if audit_log_file != '' %}
+ - name: AUDIT_FILE
+ value: "{{ audit_log_file }}"
+{% endif %}
+
+{% if audit_pos_log_file != '' %}
+ - name: AUDIT_POS_FILE
+ value: "{{ audit_pos_log_file }}"
+{% endif %}
+
volumes:
- name: runlogjournal
hostPath:
@@ -196,7 +222,9 @@ spec:
- name: dockerdaemoncfg
hostPath:
path: /etc/docker
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
secret:
secretName: logging-mux
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 73e935d3f..3da861d03 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,4 +1,9 @@
---
+# openshift_master_defaults_in_use is a workaround to detect if we are consuming
+# the plays from the role or outside of the role.
+openshift_master_defaults_in_use: True
+openshift_master_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
@@ -26,6 +31,9 @@ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+containerized_svc_dir: "/usr/lib/systemd/system"
+ha_svc_template_path: "native-cluster"
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
@@ -38,8 +46,99 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
+
+openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
+
+openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}"
+openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
+openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
+
+openshift_master_node_config_networkconfig_mtu: 1450
+
+openshift_master_node_config_kubeletargs_cpu: 500m
+openshift_master_node_config_kubeletargs_mem: 512M
+
+openshift_master_bootstrap_enabled: False
+
+openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}"
+
+openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}"
+
+# these are for the default settings in a generated node-config.yaml
+openshift_master_node_config_default_edits:
+- key: nodeName
+ state: absent
+- key: dnsBindAddress
+ value: 127.0.0.1:53
+- key: dnsDomain
+ value: cluster.local
+- key: dnsRecursiveResolvConf
+ value: /etc/origin/node/resolv.conf
+- key: imageConfig.format
+ value: "{{ openshift_master_config_imageconfig_format }}"
+- key: kubeletArguments.cloud-config
+ value:
+ - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf"
+- key: kubeletArguments.cloud-provider
+ value:
+ - "{{ openshift_master_cloud_provider }}"
+- key: kubeletArguments.kube-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: kubeletArguments.system-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: enable-controller-attach-detach
+ value:
+ - 'true'
+- key: networkConfig.mtu
+ value: 8951
+- key: networkConfig.networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+- key: networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+
+
+# We support labels for all nodes here
+openshift_master_node_config_kubeletargs_default_labels: []
+# We do support overrides for node group labels
+openshift_master_node_config_kubeletargs_master_labels: []
+openshift_master_node_config_kubeletargs_infra_labels: []
+openshift_master_node_config_kubeletargs_compute_labels: []
+
+openshift_master_node_config_master:
+ type: master
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_master_labels) |
+ union(['type=master']) }}"
+openshift_master_node_config_infra:
+ type: infra
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_infra_labels) |
+ union(['type=infra']) }}"
+openshift_master_node_config_compute:
+ type: compute
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_compute_labels) |
+ union(['type=compute']) }}"
+
+openshift_master_node_configs:
+- "{{ openshift_master_node_config_infra }}"
+- "{{ openshift_master_node_config_compute }}"
+
+openshift_master_bootstrap_namespace: openshift-node
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a657668a9..a1cda2ad4 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
- role: lib_os_firewall
diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml
index 0013f5289..eee89743c 100644
--- a/roles/openshift_master/tasks/bootstrap.yml
+++ b/roles/openshift_master/tasks/bootstrap.yml
@@ -26,3 +26,66 @@
copy:
content: "{{ kubeconfig_out.stdout }}"
dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"
+
+- name: create a temp dir for this work
+ command: mktemp -d /tmp/openshift_node_config-XXXXXX
+ register: mktempout
+ run_once: true
+
+# This generate is so that we do not have to maintain
+# our own copy of the template. This is generated by
+# the product and the following settings will be
+# generated by the master
+- name: generate a node-config dynamically
+ command: >
+ {{ openshift_master_client_binary }} adm create-node-config
+ --node-dir={{ mktempout.stdout }}/
+ --node=CONFIGMAP
+ --hostnames=test
+ --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
+ --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ register: configgen
+ run_once: true
+
+- name: remove the default settings
+ yedit:
+ state: "{{ item.state | default('present') }}"
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ key: "{{ item.key }}"
+ value: "{{ item.value | default(omit) }}"
+ with_items: "{{ openshift_master_node_config_default_edits }}"
+ run_once: true
+
+- name: copy the generated config into each group
+ copy:
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ remote_src: true
+ dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: "specialize the generated configs for node-config-{{ item.type }}"
+ yedit:
+ src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ edits: "{{ item.edits }}"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: create node-config.yaml configmap
+ oc_configmap:
+ name: "node-config-{{ item.type }}"
+ namespace: "{{ openshift_master_bootstrap_namespace }}"
+ from_file:
+ node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: remove templated files
+ file:
+ dest: "{{ mktempout.stdout }}/"
+ state: absent
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml
new file mode 100644
index 000000000..7e8a7a596
--- /dev/null
+++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml
@@ -0,0 +1,14 @@
+---
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ register: l_api_available_output
+ until: l_api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ run_once: true
+ changed_when: false
diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml
new file mode 100644
index 000000000..b0590ac84
--- /dev/null
+++ b/roles/openshift_master/tasks/configure_external_etcd.yml
@@ -0,0 +1,17 @@
+---
+- name: Remove etcdConfig section
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: "etcdConfig"
+ state: absent
+- name: Set etcdClientInfo.ca to master.etcd-ca.crt
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.ca
+ value: master.etcd-ca.crt
+- name: Set etcdClientInfo.urls to the external etcd
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.urls
+ value:
+ - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 82b4b420c..824a5886e 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -311,23 +311,7 @@
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
-- name: Wait for API to become available
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- register: l_api_available_output
- until: l_api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- run_once: true
- changed_when: false
+- include: check_master_api_is_ready.yml
when:
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 2644f235e..63d483760 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -1,14 +1,4 @@
---
-# We need to setup some variables as this play might be called directly
-# from outside of the role.
-- set_fact:
- oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
- when: oreg_auth_credentials_path is not defined
-
-- set_fact:
- oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
- when: oreg_host is not defined
-
- name: Check for credentials file for registry auth
stat:
path: "{{ oreg_auth_credentials_path }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 8de62c59a..fcc66044b 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,31 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks. For that reason the ha_svc variables are use set_fact instead of
-# the vars directory on the role.
+# systemd_units.yml is included both in the openshift_master role and in the upgrade
+# playbooks.
-# This play may be consumed outside the role, we need to ensure that
-# openshift_master_config_dir is set.
-- name: Set openshift_master_config_dir if unset
- set_fact:
- openshift_master_config_dir: '/etc/origin/master'
- when: openshift_master_config_dir is not defined
-
-# This play may be consumed outside the role, we need to ensure that
-# r_openshift_master_data_dir is set.
-- name: Set r_openshift_master_data_dir if unset
- set_fact:
- r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
- when: r_openshift_master_data_dir is not defined
-
-- include: registry_auth.yml
-
-- name: Remove the legacy master service if it exists
- include: clean_systemd_units.yml
-
-- name: Init HA Service Info
- set_fact:
- containerized_svc_dir: "/usr/lib/systemd/system"
- ha_svc_template_path: "native-cluster"
+- include: upgrade_facts.yml
+ when: openshift_master_defaults_in_use is not defined
- name: Set HA Service Info for containerized installs
set_fact:
@@ -34,6 +12,11 @@
when:
- openshift.common.is_containerized | bool
+- include: registry_auth.yml
+
+- name: Remove the legacy master service if it exists
+ include: clean_systemd_units.yml
+
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
command: >
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
new file mode 100644
index 000000000..f6ad438aa
--- /dev/null
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -0,0 +1,33 @@
+---
+# This file exists because we call systemd_units.yml from outside of the role
+# during upgrades. When we remove this pattern, we can probably
+# eliminate most of these set_fact items.
+
+- name: Set openshift_master_config_dir if unset
+ set_fact:
+ openshift_master_config_dir: '/etc/origin/master'
+ when: openshift_master_config_dir is not defined
+
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
+- set_fact:
+ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+ when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+ when: oreg_host is not defined
+
+- name: Set openshift_master_debug_level
+ set_fact:
+ openshift_master_debug_level: "{{ debug_level | default(2) }}"
+ when:
+ - openshift_master_debug_level is not defined
+
+- name: Init HA Service Info
+ set_fact:
+ containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}"
+ ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index b931f1414..7ec26ceb7 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 9b3fbcf49..40775571f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -115,7 +115,7 @@ etcdStorageConfig:
openShiftStorageVersion: v1
imageConfig:
format: {{ openshift.master.registry_url }}
- latest: false
+ latest: {{ openshift_master_image_config_latest }}
{% if 'image_policy_config' in openshift.master %}
imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}
{% endif %}
@@ -179,7 +179,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
@@ -275,12 +275,5 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift_template_service_broker_namespaces is defined %}
-templateServiceBrokerConfig:
- templateNamespaces:
-{% for namespace in openshift_template_service_broker_namespaces %}
- - {{ namespace }}
-{% endfor %}
-{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 63eb3ea1b..cc21b37af 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 0adfd05b6..493fc510e 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index a95570d38..501be148e 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -34,7 +34,6 @@
cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 7928a0346..48584bd64 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -54,6 +54,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 1214c08e5..b9f16dfd4 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,8 +1,11 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_service_type: "{{ openshift.common.service_type }}"
+openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
openshift_image_tag: ''
@@ -15,7 +18,6 @@ openshift_node_ami_prep_packages:
- openvswitch
- docker
- etcd
-#- pcs
- haproxy
- dnsmasq
- ntp
@@ -52,7 +54,6 @@ openshift_node_ami_prep_packages:
# - container-selinux
# - atomic
#
-openshift_deployment_type: origin
openshift_node_bootstrap: False
@@ -103,5 +104,11 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
new file mode 100644
index 000000000..ea280640f
--- /dev/null
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -0,0 +1,63 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: yes
+ vars:
+ origin_dns:
+ file: /etc/dnsmasq.d/origin-dns.conf
+ lines:
+ - regex: ^listen-address
+ state: present
+ line: "listen-address={{ ansible_default_ipv4.address }}"
+ node_dns:
+ file: /etc/dnsmasq.d/node-dnsmasq.conf
+ lines:
+ - regex: "^server=/in-addr.arpa/127.0.0.1$"
+ line: server=/in-addr.arpa/127.0.0.1
+ - regex: "^server=/cluster.local/127.0.0.1$"
+ line: server=/cluster.local/127.0.0.1
+
+ tasks:
+ - include_vars: openshift_settings.yaml
+
+ - name: set the data for node_dns
+ lineinfile:
+ create: yes
+ insertafter: EOF
+ path: "{{ node_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit) }}"
+ with_items: "{{ node_dns.lines }}"
+
+ - name: set the data for origin_dns
+ lineinfile:
+ create: yes
+ state: "{{ item.state | default('present') }}"
+ insertafter: "{{ item.after | default(omit) }}"
+ path: "{{ origin_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit)}}"
+ with_items: "{{ origin_dns.lines }}"
+
+ - when:
+ - openshift_group_type is defined
+ - openshift_group_type != ''
+ - openshift_group_type != 'master'
+ block:
+ - name: determine the openshift_service_type
+ stat:
+ path: /etc/sysconfig/atomic-openshift-node
+ register: service_type_results
+
+ - name: set openshift_service_type fact based on stat results
+ set_fact:
+ openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}"
+
+ - name: update the sysconfig to have necessary variables
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+ line: "{{ item.line }}"
+ regexp: "{{ item.regexp }}"
+ with_items:
+ - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
+ regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 25a6fc721..b102c1b18 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,11 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (ovs_service_status_changed | default(false) | bool)
+ - openshift_node_use_openshift_sdn | bool
+ - not openshift_node_bootstrap
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
@@ -11,10 +15,11 @@
notify:
- restart openvswitch pause
-
- name: restart openvswitch pause
pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - openshift.common.is_containerized | bool
- name: restart node
systemd:
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
new file mode 100644
index 000000000..38c2b794d
--- /dev/null
+++ b/roles/openshift_node/tasks/aws.yml
@@ -0,0 +1,21 @@
+---
+- name: Configure AWS Cloud Provider Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^AWS_ACCESS_KEY_ID='
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+ - regex: '^AWS_SECRET_ACCESS_KEY='
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ register: sys_env_update
+ no_log: True
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
+ - openshift_cloudprovider_aws_access_key is defined
+ - openshift_cloudprovider_aws_secret_key is defined
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b83b2c452..8c03f6c41 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -17,19 +17,31 @@
[Unit]
After=cloud-init.service
-- name: update the sysconfig to have KUBECONFIG
+- name: update the sysconfig to have necessary variables
lineinfile:
dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
- line: "KUBECONFIG=/root/csr_kubeconfig"
+ line: "{{ item.line | default(omit) }}"
+ regexp: "{{ item.regexp }}"
+ state: "{{ item.state | default('present') }}"
+ with_items:
+ # add the kubeconfig
+ - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig"
regexp: "^KUBECONFIG=.*"
+ # remove the config file. This comes from openshift_facts
+ - regexp: "^CONFIG_FILE=.*"
+ state: absent
-- name: update the ExecStart to have bootstrap
- lineinfile:
- dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
- line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
- regexp: "^ExecStart=.*"
+- name: include aws sysconfig credentials
+ include: aws.yml
+ static: yes
+
+#- name: update the ExecStart to have bootstrap
+# lineinfile:
+# dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
+# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
+# regexp: "^ExecStart=.*"
-- name: "systemctl enable {{ openshift_service_type }}-node"
+- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
systemd:
name: "{{ item }}"
enabled: no
@@ -42,6 +54,30 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
+- name: create directories for bootstrapping
+ file:
+ state: directory
+ dest: "{{ item }}"
+ with_items:
+ - /root/openshift_bootstrap
+ - /var/lib/origin/openshift.local.config
+ - /var/lib/origin/openshift.local.config/node
+ - "/etc/docker/certs.d/docker-registry.default.svc:5000"
+
+- name: laydown the bootstrap.yml file for on boot configuration
+ copy:
+ src: bootstrap.yml
+ dest: /root/openshift_bootstrap/bootstrap.yml
+
+- name: symlink master ca for docker-registry
+ file:
+ src: "{{ item }}"
+ dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}"
+ state: link
+ force: yes
+ with_items:
+ - /var/lib/origin/openshift.local.config/node/node-client-ca.crt
+
- when: rpmgenerated_config.stat.exists
block:
- name: Remove RPM generated config files if present
@@ -50,6 +86,7 @@
state: absent
with_items:
- master
+ - .config_managed
# with_fileglob doesn't work correctly due to a few issues.
# Could change this to fileglob when it gets fixed.
@@ -62,5 +99,7 @@
file:
path: "{{ item.path }}"
state: absent
- when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ when:
+ - "'resolv.conf' not in item.path"
+ - "'node-dnsmasq.conf' not in item.path"
with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index e3898b520..c08f43118 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -46,26 +46,16 @@
notify:
- restart node
-- name: Configure AWS Cloud Provider Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- - regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
- no_log: True
- when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
- notify:
- - restart node
+- name: include aws provider credentials
+ include: aws.yml
+ static: yes
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
-- when: openshift.common.is_containerized | bool
+- when:
+ - openshift.common.is_containerized | bool
+ - not openshift_node_bootstrap
block:
- name: Wait for master API to become available before proceeding
# Using curl here since the uri module requires python-httplib2 and
@@ -90,30 +80,28 @@
enabled: yes
state: started
-- name: Start and enable node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- enabled: yes
- state: started
- daemon_reload: yes
- register: node_start_result
- until: not node_start_result | failed
- retries: 1
- delay: 30
- ignore_errors: true
-
-- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
- when: node_start_result | failed
+- when: not openshift_node_bootstrap
+ block:
+ - name: Start and enable node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: node_start_result
+ until: not node_start_result | failed
+ retries: 1
+ delay: 30
+ ignore_errors: true
-- name: Abort if node failed to start
- fail:
- msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
+ - name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
-- name: Setup tuned
- include: tuned.yml
- static: yes
+ - name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
-- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+ - set_fact:
+ node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
deleted file mode 100644
index f92ff79b5..000000000
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 265bf2c46..6b7e40491 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,12 +3,12 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
@@ -27,5 +27,3 @@
docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 59b8bb76e..eae9ca7bc 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -66,15 +66,10 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- name: include bootstrap node config
- include: bootstrap.yml
- when: openshift_node_bootstrap
-
- include: registry_auth.yml
- name: include standard node config
include: config.yml
- when: not openshift_node_bootstrap
#### Storage class plugins here ####
- name: NFS storage plugin configuration
@@ -98,3 +93,7 @@
- include: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
+
+- name: include bootstrap node config
+ include: bootstrap.yml
+ when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 6b4490f61..9c182ade6 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,11 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- when: not openshift.common.is_containerized | bool
+ src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ when: not openshift.common.is_node_system_container | bool
notify:
- reload systemd units
- restart node
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 0856737f6..7602d8ee6 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -12,17 +12,17 @@ After=dnsmasq.service
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
Environment=GOTRACEBACK=crash
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=65536
LimitCORE=infinity
WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
TimeoutStartSec=300
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7049f7189..718d35dca 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -13,7 +13,7 @@ dockerConfig:
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
- latest: false
+ latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
{% if openshift_use_crio | default(False) %}
@@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -67,9 +67,11 @@ servingInfo:
{% endfor %}
{% endif %}
volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
+{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+{% endif %}
volumeConfig:
localQuota:
perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index 0d5fa664c..b45130400 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -11,7 +11,6 @@
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index c7c0ff34a..73b98ad90 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -49,7 +49,6 @@ From openshift.node:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
-| openshift.node.debug_level |---------------------|---------------------|
| openshift.node.node_image |---------------------|---------------------|
| openshift.node.ovs_image |---------------------|---------------------|
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 6507b015d..10b4c6977 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1,4 +1,6 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
openshift_use_openshift_sdn: True
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index afff2f8ba..226f5290c 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -6,7 +6,7 @@
# - openshift.node.ovs_image
# - openshift_use_openshift_sdn
# - openshift.common.service_type
-# - openshift.node.debug_level
+# - openshift_node_debug_level
# - openshift.common.config_base
# - openshift.common.http_proxy
# - openshift.common.portal_net
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 5aa8aecec..c08bec4cb 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"}
# images
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
-openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1"
openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
-# All the required exports
-openshift_prometheus_pv_exports:
- - prometheus
- - prometheus-alertmanager
- - prometheus-alertbuffer
-# PV template files and their created object names
-openshift_prometheus_pv_data:
- - pv_name: prometheus
- pv_template: prom-pv-server.yml
- pv_label: Prometheus Server PV
- - pv_name: prometheus-alertmanager
- pv_template: prom-pv-alertmanager.yml
- pv_label: Prometheus Alertmanager PV
- - pv_name: prometheus-alertbuffer
- pv_template: prom-pv-alertbuffer.yml
- pv_label: Prometheus Alert Buffer PV
-
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
-
# storage
openshift_prometheus_storage_type: pvc
openshift_prometheus_pvc_name: prometheus
-openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_pvc_pv_selector: {}
+openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
openshift_prometheus_alertmanager_storage_type: pvc
openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
-openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertmanager_pvc_pv_selector: {}
+openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
openshift_prometheus_alertbuffer_storage_type: pvc
openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
-openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
deleted file mode 100644
index 3ccedb1fd..000000000
--- a/roles/openshift_prometheus/files/openshift_prometheus.exports
+++ /dev/null
@@ -1,3 +0,0 @@
-/exports/prometheus *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
deleted file mode 100644
index 4e79da05f..000000000
--- a/roles/openshift_prometheus/tasks/create_pvs.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required Prometheus PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- state: list
- kind: pv
- name: "{{ item.pv_name }}"
- register: prom_pv_check
-
-# Skip all of this if the PV already exists
-- block:
- - name: "Ensure the {{ item.pv_label }} template is evaluated"
- template:
- src: "{{ item.pv_template }}.j2"
- dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
-
- - name: "Ensure {{ item.pv_label }} is created"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- kind: pv
- name: "{{ item.pv_name }}"
- state: present
- delete_after: True
- files:
- - "{{ tempdir }}/templates/{{ item.pv_template }}"
- when:
- - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index a9bce2fb1..cb75eedca 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -54,15 +54,6 @@
resource_name: cluster-reader
user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yaml
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-
# create prometheus and alerts services
# TODO join into 1 task with loop
- name: Create prometheus service
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
deleted file mode 100644
index 0b45f2cee..000000000
--- a/roles/openshift_prometheus/tasks/nfs.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-- name: Ensure the /exports/ directory exists
- file:
- path: /exports/
- state: directory
- mode: 0755
- owner: root
- group: root
-
-- name: Ensure the prom-pv0X export directories exist
- file:
- path: "/exports/{{ item }}"
- state: directory
- mode: 0777
- owner: nfsnobody
- group: nfsnobody
- with_items: "{{ openshift_prometheus_pv_exports }}"
-
-- name: Ensure the NFS exports for Prometheus PVs exist
- copy:
- src: openshift_prometheus.exports
- dest: /etc/exports.d/openshift_prometheus.exports
- register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
- command: exportfs -ar
- when:
- - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required Prometheus PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yaml
- with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
deleted file mode 100644
index 55a5e19c3..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertbuffer
- labels:
- storage: prometheus-alertbuffer
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertbuffer
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
deleted file mode 100644
index 4ee518735..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertmanager
- labels:
- storage: prometheus-alertmanager
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertmanager
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
deleted file mode 100644
index 933bf0f60..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus
- labels:
- storage: prometheus
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
index 98c117f19..66eab6df4 100644
--- a/roles/openshift_prometheus/templates/prometheus_deployment.j2
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -38,7 +38,7 @@ spec:
cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
{% endif %}
limits:
-{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index 71e21a269..56b2d1463 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -4,22 +4,23 @@ metadata:
name: service-catalog
objects:
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: servicecatalog-serviceclass-viewer
rules:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- list
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: servicecatalog-serviceclass-viewer-binding
roleRef:
@@ -37,8 +38,8 @@ objects:
metadata:
name: service-catalog-apiserver
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: sar-creator
rules:
@@ -49,17 +50,19 @@ objects:
verbs:
- create
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-sar-creator-binding
roleRef:
name: sar-creator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: namespace-viewer
rules:
@@ -72,26 +75,30 @@ objects:
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: service-catalog-controller
rules:
@@ -102,6 +109,7 @@ objects:
verbs:
- create
- update
+ - patch
- delete
- get
- list
@@ -109,19 +117,22 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers/status
- - instances/status
- - bindings/status
+ - clusterservicebrokers/status
+ - serviceinstances/status
+ - servicebindings/status
+ - servicebindings/finalizers
+ - serviceinstances/reference
verbs:
- update
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers
- - instances
- - bindings
+ - clusterservicebrokers
+ - serviceinstances
+ - servicebindings
verbs:
- list
+ - get
- watch
- apiGroups:
- ""
@@ -133,7 +144,8 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- create
- delete
@@ -154,17 +166,19 @@ objects:
- list
- watch
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-binding
roleRef:
name: service-catalog-controller
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
-
-- kind: Role
- apiVersion: v1
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
+
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: endpoint-accessor
rules:
@@ -179,21 +193,25 @@ objects:
- create
- update
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
- name: endpoint-accessor-binding
+ name: endpointer-accessor-binding
roleRef:
name: endpoint-accessor
namespace: kube-service-catalog
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: system:auth-delegator-binding
roleRef:
name: system:auth-delegator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
index f6ee0955d..e1af51ce6 100644
--- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -4,8 +4,8 @@ metadata:
name: kube-system-service-catalog
objects:
-- kind: Role
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: extension-apiserver-authentication-reader
namespace: ${KUBE_SYSTEM_NAMESPACE}
@@ -19,16 +19,18 @@ objects:
verbs:
- get
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
name: extension-apiserver-authentication-reader-binding
namespace: ${KUBE_SYSTEM_NAMESPACE}
roleRef:
name: extension-apiserver-authentication-reader
- namespace: kube-system
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
parameters:
- description: Do not change this value.
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cc897b032..416bdac70 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -36,19 +36,28 @@
- name: tls.key
path: "{{ generated_certs_dir }}/apiserver.key"
+- name: Create service-catalog-ssl secret
+ oc_secret:
+ state: present
+ name: service-catalog-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+
- slurp:
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
- shell: >
- oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
register: get_apiservices
changed_when: no
- name: Create api service
oc_obj:
state: present
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
kind: apiservices.apiregistration.k8s.io
namespace: "kube-service-catalog"
content:
@@ -57,10 +66,10 @@
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
spec:
group: servicecatalog.k8s.io
- version: v1alpha1
+ version: v1beta1
service:
namespace: "kube-service-catalog"
name: apiserver
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index e202ae173..1e94c8c5d 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -90,14 +90,14 @@
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -113,14 +113,14 @@
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index 2fb1ec440..96ae61507 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,7 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+ oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
- name: Remove Policy Binding
command: >
@@ -13,7 +13,7 @@
# state: absent
# namespace: "kube-service-catalog"
# kind: apiservices.apiregistration.k8s.io
-# name: v1alpha1.servicecatalog.k8s.io
+# name: v1beta1.servicecatalog.k8s.io
- name: Remove Service Catalog API Server route
oc_obj:
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index c09834fd4..5d5352c1c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -41,7 +41,9 @@ spec:
- --cors-allowed-origins
- {{ cors_allowed_origin }}
- --admission-control
- - "KubernetesNamespaceLifecycle"
+ - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/apiserver"]
imagePullPolicy: Always
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 1bbc0fa2c..2272cbb44 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -31,7 +31,12 @@ spec:
args:
- -v
- "5"
- - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ - --leader-election-namespace
+ - kube-service-catalog
+ - --broker-relist-interval
+ - "5m"
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/controller-manager"]
imagePullPolicy: Always
@@ -41,7 +46,19 @@ spec:
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: service-catalog-ssl
+ readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
+ volumes:
+ - name: service-catalog-ssl
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ secretName: apiserver-ssl
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 3047fbaf9..c4e023c1e 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -35,6 +35,9 @@
- "{{ openshift.logging }}"
- "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
+ - "{{ openshift.prometheus }}"
+ - "{{ openshift.prometheus.alertmanager }}"
+ - "{{ openshift.prometheus.alertbuffer }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 0141e0d25..c2a741035 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -3,3 +3,6 @@
{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
+{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
+{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
+{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index fb407c4a2..a92a138b0 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -2,3 +2,4 @@
# placeholder file?
template_service_broker_remove: False
template_service_broker_install: False
+openshift_template_service_broker_namespaces: ['openshift']
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index f5fd6487c..6a532a206 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -6,7 +6,7 @@
- "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- "default_images.yml"
-- name: set ansible_service_broker facts
+- name: set template_service_broker facts
set_fact:
template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
@@ -28,10 +28,24 @@
- "{{ __tsb_template_file }}"
- "{{ __tsb_rbac_file }}"
- "{{ __tsb_broker_file }}"
+ - "{{ __tsb_config_file }}"
+
+- yedit:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ key: templateNamespaces
+ value: "{{ openshift_template_service_broker_namespaces }}"
+ value_type: list
+
+- slurp:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ register: config
- name: Apply template file
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
+ | kubectl apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file
@@ -62,7 +76,7 @@
when: openshift_master_config_dir is undefined
- slurp:
- src: "{{ openshift_master_config_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: __ca_bundle
# Register with broker
diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml
new file mode 100644
index 000000000..418a4b521
--- /dev/null
+++ b/roles/tuned/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+tuned_etc_directory: '/etc/tuned'
+tuned_templates_source: '../templates'
diff --git a/roles/tuned/meta/main.yml b/roles/tuned/meta/main.yml
new file mode 100644
index 000000000..833d94c13
--- /dev/null
+++ b/roles/tuned/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Jiri Mencak
+ description: Restart the tuned daemon if present and make it use the recommended profile
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/tuned/tasks/main.yml
index 425bf6a26..e95d274d5 100644
--- a/roles/openshift_node/tasks/tuned.yml
+++ b/roles/tuned/tasks/main.yml
@@ -12,8 +12,6 @@
- name: Set tuned OpenShift variables
set_fact:
openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
- tuned_etc_directory: '/etc/tuned'
- tuned_templates_source: '../templates/tuned'
- name: Ensure directory structure exists
file:
diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/tuned/templates/openshift-control-plane/tuned.conf
index f22f21065..f22f21065 100644
--- a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf
+++ b/roles/tuned/templates/openshift-control-plane/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/tuned/templates/openshift-node/tuned.conf
index 78c7d19c9..78c7d19c9 100644
--- a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf
+++ b/roles/tuned/templates/openshift-node/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/tuned/templates/openshift/tuned.conf
index 68ac5dadb..68ac5dadb 100644
--- a/roles/openshift_node/templates/tuned/openshift/tuned.conf
+++ b/roles/tuned/templates/openshift/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/tuned/templates/recommend.conf
index 5fa765798..086e5673d 100644
--- a/roles/openshift_node/templates/tuned/recommend.conf
+++ b/roles/tuned/templates/recommend.conf
@@ -1,8 +1,11 @@
-[openshift-node]
-/etc/origin/node/node-config.yaml=.*region=primary
-
[openshift-control-plane,master]
/etc/origin/master/master-config.yaml=.*
[openshift-control-plane,node]
/etc/origin/node/node-config.yaml=.*region=infra
+
+[openshift-control-plane,lb]
+/etc/haproxy/haproxy.cfg=.*
+
+[openshift-node]
+/etc/origin/node/node-config.yaml=.*