summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory3
-rwxr-xr-x.papr.sh3
-rw-r--r--.papr.yml31
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_CONTAINERIZED_INSTALLATION.md12
-rw-r--r--callback_plugins/default.py70
-rw-r--r--docs/proposals/playbook_consolidation.md178
-rw-r--r--files/origin-components/apiserver-config.yaml4
-rw-r--r--files/origin-components/apiserver-template.yaml122
-rw-r--r--files/origin-components/rbac-template.yaml92
-rw-r--r--files/origin-components/template-service-broker-registration.yaml25
-rw-r--r--filter_plugins/oo_filters.py325
-rw-r--r--filter_plugins/openshift_version.py4
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--images/installer/README_CONTAINER_IMAGE.md4
-rw-r--r--images/installer/root/exports/config.json.template2
-rwxr-xr-ximages/installer/root/usr/local/bin/run7
-rw-r--r--inventory/byo/hosts.example (renamed from inventory/byo/hosts.ose.example)217
-rw-r--r--inventory/byo/hosts.origin.example892
-rw-r--r--lookup_plugins/README.md1
-rw-r--r--lookup_plugins/oo_option.py74
-rw-r--r--openshift-ansible.spec367
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml29
-rw-r--r--playbooks/adhoc/uninstall.yml38
-rw-r--r--playbooks/aws/BUILD_AMI.md21
-rw-r--r--playbooks/aws/PREREQUISITES.md40
-rw-r--r--playbooks/aws/README.md145
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml67
-rw-r--r--playbooks/aws/openshift-cluster/install.yml63
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml13
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml12
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini25
-rw-r--r--playbooks/aws/provisioning_vars.yml.example120
-rw-r--r--playbooks/byo/openshift-cluster/openshift-hosted.yml6
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml2
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml24
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml12
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml6
-rw-r--r--playbooks/byo/openshift-etcd/certificates.yml8
-rw-r--r--playbooks/byo/openshift-etcd/embedded2external.yml6
-rw-r--r--playbooks/byo/openshift-loadbalancer/config.yml6
-rw-r--r--playbooks/byo/openshift-management/config.yml (renamed from playbooks/byo/openshift-cfme/config.yml)2
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml (renamed from playbooks/byo/openshift-cfme/uninstall.yml)2
-rw-r--r--playbooks/byo/openshift-master/certificates.yml6
-rw-r--r--playbooks/byo/openshift-nfs/config.yml6
-rw-r--r--playbooks/byo/openshift-node/certificates.yml6
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml59
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml11
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml4
-rw-r--r--playbooks/common/openshift-cluster/initialize_oo_option_facts.yml27
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml15
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml96
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_registry.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_router.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml21
-rw-r--r--playbooks/common/openshift-cluster/openshift_management.yml25
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml22
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml127
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml70
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml38
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml63
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml)5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml87
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml138
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml29
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-etcd/ca.yml15
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml4
-rw-r--r--playbooks/common/openshift-etcd/config.yml24
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml86
-rw-r--r--playbooks/common/openshift-etcd/restart.yml18
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml30
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml20
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml31
-rw-r--r--playbooks/common/openshift-management/config.yml15
l---------playbooks/common/openshift-management/filter_plugins (renamed from playbooks/common/openshift-cfme/filter_plugins)0
l---------playbooks/common/openshift-management/library (renamed from playbooks/common/openshift-cfme/library)0
l---------playbooks/common/openshift-management/roles (renamed from playbooks/common/openshift-cfme/roles)0
-rw-r--r--playbooks/common/openshift-management/uninstall.yml (renamed from playbooks/common/openshift-cfme/uninstall.yml)2
-rw-r--r--playbooks/common/openshift-master/additional_config.yml29
-rw-r--r--playbooks/common/openshift-master/certificates.yml14
-rw-r--r--playbooks/common/openshift-master/config.yml92
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
-rw-r--r--playbooks/common/openshift-master/scaleup.yml6
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml34
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml (renamed from roles/openshift_service_catalog/tasks/wire_aggregator.yml)55
-rw-r--r--playbooks/common/openshift-nfs/config.yml22
-rw-r--r--playbooks/common/openshift-node/additional_config.yml52
-rw-r--r--playbooks/common/openshift-node/certificates.yml8
-rw-r--r--playbooks/common/openshift-node/config.yml117
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml17
-rw-r--r--playbooks/common/openshift-node/containerized_nodes.yml19
-rw-r--r--playbooks/common/openshift-node/enable_excluders.yml8
-rw-r--r--playbooks/common/openshift-node/etcd_client_config.yml11
-rw-r--r--playbooks/common/openshift-node/image_prep.yml21
-rw-r--r--playbooks/common/openshift-node/manage_node.yml12
-rw-r--r--playbooks/common/openshift-node/setup.yml27
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml19
-rw-r--r--roles/ansible_service_broker/defaults/main.yml11
-rw-r--r--roles/ansible_service_broker/tasks/install.yml282
-rw-r--r--roles/ansible_service_broker/tasks/main.yml4
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml70
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml1
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml2
-rw-r--r--roles/calico/tasks/main.yml22
-rw-r--r--roles/cockpit-ui/defaults/main.yml3
-rw-r--r--roles/cockpit-ui/tasks/main.yml4
-rw-r--r--roles/docker/defaults/main.yml20
-rw-r--r--roles/docker/handlers/main.yml1
-rw-r--r--roles/docker/tasks/main.yml11
-rw-r--r--roles/docker/tasks/package_docker.yml63
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml68
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml32
-rw-r--r--roles/docker/templates/crio.conf.j226
-rw-r--r--roles/docker/templates/custom.conf.j26
-rw-r--r--roles/docker/templates/registries.conf46
-rw-r--r--roles/etcd/defaults/main.yaml79
-rwxr-xr-xroles/etcd/library/delegated_serial_command.py (renamed from roles/etcd_common/library/delegated_serial_command.py)0
-rw-r--r--roles/etcd/meta/main.yml3
-rw-r--r--roles/etcd/tasks/auxiliary/clean_data.yml (renamed from roles/etcd_migrate/tasks/clean_data.yml)2
-rw-r--r--roles/etcd/tasks/auxiliary/disable_etcd.yml5
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/drop_etcdctl.yml)2
-rw-r--r--roles/etcd/tasks/auxiliary/force_new_cluster.yml31
-rw-r--r--roles/etcd/tasks/backup.archive.yml3
-rw-r--r--roles/etcd/tasks/backup.copy.yml3
-rw-r--r--roles/etcd/tasks/backup.fetch.yml3
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml12
-rw-r--r--roles/etcd/tasks/backup.unarchive.yml3
-rw-r--r--roles/etcd/tasks/backup.yml2
-rw-r--r--roles/etcd/tasks/backup/archive.yml5
-rw-r--r--roles/etcd/tasks/backup/backup.yml (renamed from roles/etcd_common/tasks/backup.yml)20
-rw-r--r--roles/etcd/tasks/backup/copy.yml5
-rw-r--r--roles/etcd/tasks/backup/fetch.yml8
-rw-r--r--roles/etcd/tasks/backup/unarchive.yml14
-rw-r--r--roles/etcd/tasks/backup/vars.yml18
-rw-r--r--roles/etcd/tasks/backup_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_master_etcd_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_server_certificates.yml2
-rw-r--r--roles/etcd/tasks/ca.yml2
-rw-r--r--roles/etcd/tasks/certificates/backup_ca_certificates.yml12
-rw-r--r--roles/etcd/tasks/certificates/backup_generated_certificates.yml13
-rw-r--r--roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml7
-rw-r--r--roles/etcd/tasks/certificates/backup_server_certificates.yml11
-rw-r--r--roles/etcd/tasks/certificates/deploy_ca.yml (renamed from roles/etcd_ca/tasks/main.yml)4
-rw-r--r--roles/etcd/tasks/certificates/distribute_ca.yml47
-rw-r--r--roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml (renamed from roles/etcd_client_certificates/tasks/main.yml)2
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml (renamed from roles/etcd_server_certificates/tasks/main.yml)4
-rw-r--r--roles/etcd/tasks/certificates/remove_ca_certificates.yml5
-rw-r--r--roles/etcd/tasks/certificates/remove_generated_certificates.yml5
-rw-r--r--roles/etcd/tasks/certificates/retrieve_ca_certificates.yml8
-rw-r--r--roles/etcd/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd/tasks/clean_data.yml2
-rw-r--r--roles/etcd/tasks/client_certificates.yml2
-rw-r--r--roles/etcd/tasks/disable_etcd.yml2
-rw-r--r--roles/etcd/tasks/distribute_ca2
-rw-r--r--roles/etcd/tasks/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/fetch_backup.yml8
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/etcd/tasks/migrate.add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migrate.configure_master.yml2
-rw-r--r--roles/etcd/tasks/migrate.pre_check.yml2
-rw-r--r--roles/etcd/tasks/migrate.yml2
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml (renamed from roles/etcd_migrate/tasks/add_ttls.yml)1
-rw-r--r--roles/etcd/tasks/migration/check.yml (renamed from roles/etcd_migrate/tasks/check.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_health.yml (renamed from roles/etcd_migrate/tasks/check_cluster_health.yml)0
-rw-r--r--roles/etcd/tasks/migration/check_cluster_status.yml (renamed from roles/etcd_migrate/tasks/check_cluster_status.yml)0
-rw-r--r--roles/etcd/tasks/migration/configure_master.yml (renamed from roles/etcd_migrate/tasks/configure.yml)0
-rw-r--r--roles/etcd/tasks/migration/migrate.yml (renamed from roles/etcd_migrate/tasks/migrate.yml)0
-rw-r--r--roles/etcd/tasks/remove_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/remove_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/retrieve_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/server_certificates.yml6
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_image.yml (renamed from roles/etcd_upgrade/tasks/upgrade_image.yml)14
-rw-r--r--roles/etcd/tasks/upgrade/upgrade_rpm.yml (renamed from roles/etcd_upgrade/tasks/upgrade_rpm.yml)5
-rw-r--r--roles/etcd/tasks/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade_rpm.yml2
-rw-r--r--roles/etcd/templates/etcd.conf.j21
-rw-r--r--roles/etcd/templates/etcdctl.sh.j2 (renamed from roles/etcd_common/templates/etcdctl.sh.j2)0
-rw-r--r--roles/etcd/templates/openssl_append.j2 (renamed from roles/etcd_ca/templates/openssl_append.j2)0
-rw-r--r--roles/etcd_ca/README.md34
-rw-r--r--roles/etcd_client_certificates/README.md34
-rw-r--r--roles/etcd_client_certificates/meta/main.yml16
-rw-r--r--roles/etcd_common/README.md53
-rw-r--r--roles/etcd_common/defaults/main.yml75
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/etcd_common/vars/main.yml4
-rw-r--r--roles/etcd_migrate/README.md53
-rw-r--r--roles/etcd_migrate/defaults/main.yml3
-rw-r--r--roles/etcd_migrate/meta/main.yml17
-rw-r--r--roles/etcd_migrate/tasks/main.yml25
-rw-r--r--roles/etcd_server_certificates/README.md34
-rw-r--r--roles/etcd_server_certificates/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/defaults/main.yml3
-rw-r--r--roles/etcd_upgrade/meta/main.yml17
-rw-r--r--roles/etcd_upgrade/tasks/main.yml14
-rw-r--r--roles/etcd_upgrade/tasks/upgrade.yml11
-rw-r--r--roles/etcd_upgrade/vars/main.yml3
-rw-r--r--roles/flannel/README.md2
-rw-r--r--roles/flannel/handlers/main.yml9
-rw-r--r--roles/flannel/meta/main.yml5
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/installer_checkpoint/README.md176
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py187
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py10
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py10
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py4
-rw-r--r--roles/lib_openshift/library/oc_configmap.py4
-rw-r--r--roles/lib_openshift/library/oc_edit.py4
-rw-r--r--roles/lib_openshift/library/oc_env.py4
-rw-r--r--roles/lib_openshift/library/oc_group.py4
-rw-r--r--roles/lib_openshift/library/oc_image.py4
-rw-r--r--roles/lib_openshift/library/oc_label.py4
-rw-r--r--roles/lib_openshift/library/oc_obj.py4
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py4
-rw-r--r--roles/lib_openshift/library/oc_process.py4
-rw-r--r--roles/lib_openshift/library/oc_project.py4
-rw-r--r--roles/lib_openshift/library/oc_pvc.py4
-rw-r--r--roles/lib_openshift/library/oc_route.py16
-rw-r--r--roles/lib_openshift/library/oc_scale.py4
-rw-r--r--roles/lib_openshift/library/oc_secret.py20
-rw-r--r--roles/lib_openshift/library/oc_service.py4
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py4
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py4
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py4
-rw-r--r--roles/lib_openshift/library/oc_user.py4
-rw-r--r--roles/lib_openshift/library/oc_version.py4
-rw-r--r--roles/lib_openshift/library/oc_volume.py4
-rw-r--r--roles/lib_openshift/src/ansible/oc_route.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_secret.py1
-rw-r--r--roles/lib_openshift/src/class/oc_route.py1
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py3
-rw-r--r--roles/lib_openshift/src/doc/route6
-rw-r--r--roles/lib_openshift/src/doc/secret6
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/lib_openshift/src/lib/route.py4
-rw-r--r--roles/lib_openshift/src/lib/secret.py6
-rw-r--r--roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py (renamed from roles/lib_openshift/src/test/integration/filter_plugins/filters.py)0
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_configmap.yml4
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_configmap.py6
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_route.py11
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rw-r--r--roles/nuage_master/meta/main.yml3
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j26
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j215
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_aws/README.md79
-rw-r--r--roles/openshift_aws/defaults/main.yml8
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py (renamed from roles/openshift_aws/filter_plugins/filters.py)0
-rw-r--r--roles/openshift_aws/tasks/elb.yml8
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml10
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml15
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/provision.yml12
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml (renamed from roles/openshift_aws/tasks/build_ami.yml)43
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml35
-rw-r--r--roles/openshift_ca/defaults/main.yml8
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/vars/main.yml7
-rw-r--r--roles/openshift_cfme/README.md404
-rw-r--r--roles/openshift_cfme/defaults/main.yml42
-rw-r--r--roles/openshift_cfme/files/miq-template.yaml566
-rw-r--r--roles/openshift_cfme/files/openshift_cfme.exports3
-rw-r--r--roles/openshift_cfme/handlers/main.yml37
-rw-r--r--roles/openshift_cfme/img/CFMEBasicDeployment.pngbin38316 -> 0 bytes
-rw-r--r--roles/openshift_cfme/tasks/create_pvs.yml36
-rw-r--r--roles/openshift_cfme/tasks/main.yml117
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml51
-rw-r--r--roles/openshift_cfme/tasks/tune_masters.yml12
-rw-r--r--roles/openshift_cfme/tasks/uninstall.yml46
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j213
-rw-r--r--roles/openshift_default_storage_class/README.md2
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml9
-rw-r--r--roles/openshift_etcd_client_certificates/meta/main.yml4
-rw-r--r--roles/openshift_etcd_client_certificates/tasks/main.yml4
-rw-r--r--roles/openshift_etcd_server_certificates/meta/main.yml16
-rw-r--r--roles/openshift_examples/README.md14
-rw-r--r--roles/openshift_excluder/tasks/install.yml31
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py204
-rw-r--r--roles/openshift_gcp/defaults/main.yml58
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh320
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py158
-rw-r--r--roles/openshift_health_checker/library/ocutil.py11
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py134
-rw-r--r--roles/openshift_health_checker/openshift_checks/diagnostics.py62
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py39
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py118
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py12
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py126
-rw-r--r--roles/openshift_health_checker/test/diagnostics_test.py50
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py53
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py42
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py6
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py22
-rw-r--r--roles/openshift_health_checker/test/fluentd_config_test.py10
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py8
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py36
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py14
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py12
-rw-r--r--roles/openshift_hosted/defaults/main.yml72
-rw-r--r--roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py (renamed from roles/openshift_hosted/filter_plugins/filters.py)0
-rw-r--r--roles/openshift_hosted/meta/main.yml1
-rw-r--r--roles/openshift_hosted/tasks/create_projects.yml14
-rw-r--r--roles/openshift_hosted/tasks/firewall.yml (renamed from roles/openshift_hosted/tasks/router/firewall.yml)10
-rw-r--r--roles/openshift_hosted/tasks/main.yml20
-rw-r--r--roles/openshift_hosted/tasks/registry.yml (renamed from roles/openshift_hosted/tasks/registry/registry.yml)65
-rw-r--r--roles/openshift_hosted/tasks/router.yml (renamed from roles/openshift_hosted/tasks/router/router.yml)54
-rw-r--r--roles/openshift_hosted/tasks/secure.yml (renamed from roles/openshift_hosted/tasks/registry/secure.yml)6
-rw-r--r--roles/openshift_hosted/tasks/secure/passthrough.yml (renamed from roles/openshift_hosted/tasks/registry/secure/passthrough.yml)0
-rw-r--r--roles/openshift_hosted/tasks/secure/reencrypt.yml (renamed from roles/openshift_hosted/tasks/registry/secure/reencrypt.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml (renamed from roles/openshift_hosted/tasks/registry/storage/glusterfs.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml (renamed from roles/openshift_hosted/tasks/registry/storage/object_storage.yml)0
l---------roles/openshift_hosted/tasks/storage/registry_config.j2 (renamed from roles/openshift_hosted/tasks/registry/storage/registry_config.j2)0
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml (renamed from roles/openshift_hosted/tasks/registry/storage/s3.yml)2
-rw-r--r--roles/openshift_hosted/tasks/wait_for_pod.yml36
-rw-r--r--roles/openshift_hosted/templates/registry_config.j28
-rw-r--r--roles/openshift_hosted/vars/main.yml11
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml9
-rw-r--r--roles/openshift_hosted_logging/README.md40
-rw-r--r--roles/openshift_hosted_logging/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_logging/handlers/main.yml21
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml3
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml59
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml177
-rw-r--r--roles/openshift_hosted_logging/tasks/main.yaml8
-rw-r--r--roles/openshift_hosted_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml32
-rw-r--r--roles/openshift_logging/README.md36
-rw-r--r--roles/openshift_logging/defaults/main.yml66
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py19
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py11
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml7
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml27
-rw-r--r--roles/openshift_logging/tasks/main.yaml7
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml10
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml63
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j249
-rw-r--r--roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j231
-rw-r--r--roles/openshift_logging_eventrouter/README.md20
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml9
-rw-r--r--roles/openshift_logging_eventrouter/files/eventrouter-template.yaml103
-rw-r--r--roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml40
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml59
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml6
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j2109
-rw-r--r--roles/openshift_logging_eventrouter/vars/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluent.conf.j22
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j264
-rw-r--r--roles/openshift_logging_mux/files/fluent.conf2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j246
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml5
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_management/README.md475
-rw-r--r--roles/openshift_management/defaults/main.yml90
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml28
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml10
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml13
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml35
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml763
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template.yaml940
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml28
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml10
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml13
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml35
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml771
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-template.yaml948
-rw-r--r--roles/openshift_management/handlers/main.yml0
-rw-r--r--roles/openshift_management/meta/main.yml (renamed from roles/openshift_cfme/meta/main.yml)1
-rw-r--r--roles/openshift_management/tasks/accounts.yml28
-rw-r--r--roles/openshift_management/tasks/main.yml79
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml69
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml67
-rw-r--r--roles/openshift_management/tasks/storage/storage.yml3
-rw-r--r--roles/openshift_management/tasks/template.yml128
-rw-r--r--roles/openshift_management/tasks/uninstall.yml23
-rw-r--r--roles/openshift_management/tasks/validate.yml90
-rw-r--r--roles/openshift_management/vars/main.yml76
-rw-r--r--roles/openshift_master/README.md2
-rw-r--r--roles/openshift_master/defaults/main.yml12
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/check_master_api_is_ready.yml14
-rw-r--r--roles/openshift_master/tasks/configure_external_etcd.yml17
-rw-r--r--roles/openshift_master/tasks/main.yml44
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml20
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml33
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml33
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j24
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master/vars/main.yml19
-rw-r--r--roles/openshift_master_certificates/meta/main.yml4
-rw-r--r--roles/openshift_master_facts/defaults/main.yml2
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py30
l---------roles/openshift_master_facts/lookup_plugins/oo_option.py1
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py17
-rw-r--r--roles/openshift_master_facts/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py16
-rw-r--r--roles/openshift_metrics/README.md2
-rw-r--r--roles/openshift_metrics/defaults/main.yaml16
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml8
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j23
-rw-r--r--roles/openshift_metrics/templates/route.j23
-rw-r--r--roles/openshift_metrics/vars/default_images.yml4
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml4
-rw-r--r--roles/openshift_named_certificates/defaults/main.yml6
-rw-r--r--roles/openshift_named_certificates/tasks/named_certificates.yml32
-rw-r--r--roles/openshift_named_certificates/vars/main.yml6
-rw-r--r--roles/openshift_nfs/README.md17
-rw-r--r--roles/openshift_nfs/defaults/main.yml8
-rw-r--r--roles/openshift_nfs/meta/main.yml (renamed from roles/openshift_etcd_ca/meta/main.yml)8
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml34
-rw-r--r--roles/openshift_nfs/tasks/firewall.yml (renamed from roles/openshift_hosted/tasks/registry/firewall.yml)12
-rw-r--r--roles/openshift_nfs/tasks/setup.yml29
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_node/defaults/main.yml13
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/meta/main.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml20
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/install.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml36
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml19
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_certificates/meta/main.yml3
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh4
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml9
-rw-r--r--roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py (renamed from filter_plugins/openshift_node.py)6
-rw-r--r--roles/openshift_node_facts/tasks/main.yml11
-rw-r--r--roles/openshift_node_upgrade/README.md3
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml24
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service18
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml2
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml5
-rw-r--r--roles/openshift_repos/README.md10
-rw-r--r--roles/openshift_repos/tasks/centos_repos.yml25
-rw-r--r--roles/openshift_repos/tasks/main.yaml19
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2 (renamed from roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo)2
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j227
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j227
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j227
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py44
-rw-r--r--roles/openshift_sanitize_inventory/library/conditional_set_fact.py68
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml48
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml17
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml21
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml22
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml81
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml31
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml61
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml27
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml4
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j26
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml15
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml11
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml13
-rw-r--r--roles/rhel_subscribe/tasks/main.yml18
-rw-r--r--roles/template_service_broker/defaults/main.yml4
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js2
-rw-r--r--roles/template_service_broker/meta/main.yml (renamed from roles/etcd_ca/meta/main.yml)7
-rw-r--r--roles/template_service_broker/tasks/install.yml77
-rw-r--r--roles/template_service_broker/tasks/main.yml8
-rw-r--r--roles/template_service_broker/tasks/remove.yml35
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/main.yml7
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/tuned/defaults/main.yml3
-rw-r--r--roles/tuned/meta/main.yml (renamed from roles/etcd_common/meta/main.yml)8
-rw-r--r--roles/tuned/tasks/main.yml (renamed from roles/openshift_node/tasks/tuned.yml)2
-rw-r--r--roles/tuned/templates/openshift-control-plane/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift-node/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-node/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift/tuned.conf)0
-rw-r--r--roles/tuned/templates/recommend.conf (renamed from roles/openshift_node/templates/tuned/recommend.conf)9
-rw-r--r--setup.py2
-rw-r--r--test/integration/openshift_health_checker/common.go2
-rw-r--r--test/openshift_version_tests.py30
-rw-r--r--utils/docs/config.md1
590 files changed, 13217 insertions, 6005 deletions
diff --git a/.papr.inventory b/.papr.inventory
index 878d434e2..aa4324c21 100644
--- a/.papr.inventory
+++ b/.papr.inventory
@@ -11,6 +11,9 @@ openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
openshift_check_min_host_disk_gb=1.5
openshift_check_min_host_memory_gb=1.9
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
[masters]
ocp-master
diff --git a/.papr.sh b/.papr.sh
index decca625f..2d66fdacd 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -26,7 +26,8 @@ ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_
# check the cluster NB: we run it on the master since we may
# be in a different OSP network
ssh ocp-master docker run --rm --net=host --privileged \
- -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c \
+ -v /etc/origin/master/admin.kubeconfig:/config \
+ registry.fedoraproject.org/fedora:26 sh -c \
'"dnf install -y origin-tests && \
KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \
--ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
diff --git a/.papr.yml b/.papr.yml
index 6658720e4..119dd5fcf 100644
--- a/.papr.yml
+++ b/.papr.yml
@@ -14,28 +14,35 @@
cluster:
hosts:
- name: ocp-master
- distro: fedora/25/atomic
+ distro: fedora/26/atomic
+ specs:
+ ram: 4096
- name: ocp-node1
- distro: fedora/25/atomic
+ distro: fedora/26/atomic
- name: ocp-node2
- distro: fedora/25/atomic
+ distro: fedora/26/atomic
container:
- image: fedora:25
+ image: registry.fedoraproject.org/fedora:26
-packages:
- - gcc
- - python-pip
- - python-devel
- - libffi-devel
- - openssl-devel
- - redhat-rpm-config
+# temp workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1483553
+#packages:
+# - gcc
+# - python-pip
+# - python-devel
+# - libffi-devel
+# - openssl-devel
+# - redhat-rpm-config
-context: 'fedora/25/atomic'
+context: 'fedora/26/atomic'
env:
OPENSHIFT_IMAGE_TAG: v3.6.0
tests:
+ # temp workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1483553
+ - if (dnf distro-sync -y || :) |& grep -q -e BDB1539; then
+ rpm --rebuilddb; dnf distro-sync;
+ fi; dnf install -y gcc python-pip python-devel libffi-devel openssl-devel redhat-rpm-config
- ./.papr.sh
artifacts:
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9a5acc500..ebbed5d92 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.126.0 ./
+3.7.0-0.150.0 ./
diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md
index c697783e3..8eaa69deb 100644
--- a/README_CONTAINERIZED_INSTALLATION.md
+++ b/README_CONTAINERIZED_INSTALLATION.md
@@ -1,9 +1,8 @@
# Overview
Users may now deploy containerized versions of OpenShift Origin, OpenShift
-Enterprise, or Atomic Enterprise Platform on [Atomic
-Host](https://projectatomic.io) or RHEL, Centos, and Fedora. This includes
-OpenvSwitch based SDN.
+Enterprise on [Atomic Host](https://projectatomic.io) or RHEL, Centos, and
+Fedora. This includes OpenvSwitch based SDN.
## Installing on Atomic Host
@@ -54,13 +53,8 @@ before attempting to pull any of the following images.
openshift3/node
openshift3/openvswitch
registry.access.redhat.com/rhel7/etcd
- Atomic Enterprise Platform
- aep3/aep
- aep3/node
- aep3/openvswitch
- registry.access.redhat.com/rhel7/etcd
- * note openshift3/* and aep3/* images come from registry.access.redhat.com and
+ * note openshift3/* images come from registry.access.redhat.com and
rely on the --additional-repository flag being set appropriately.
### Starting and Stopping Containers
diff --git a/callback_plugins/default.py b/callback_plugins/default.py
deleted file mode 100644
index 97ad77724..000000000
--- a/callback_plugins/default.py
+++ /dev/null
@@ -1,70 +0,0 @@
-'''Plugin to override the default output logic.'''
-
-# upstream: https://gist.github.com/cliffano/9868180
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-# For some reason this has to be done
-import imp
-import os
-
-ANSIBLE_PATH = imp.find_module('ansible')[1]
-DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
-DEFAULT_MODULE = imp.load_source(
- 'ansible.plugins.callback.default',
- DEFAULT_PATH
-)
-
-try:
- from ansible.plugins.callback import CallbackBase
- BASECLASS = CallbackBase
-except ImportError: # < ansible 2.1
- BASECLASS = DEFAULT_MODULE.CallbackModule
-
-
-class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init
- '''
- Override for the default callback module.
-
- Render std err/out outside of the rest of the result which it prints with
- indentation.
- '''
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
- CALLBACK_NAME = 'default'
-
- def __init__(self, *args, **kwargs):
- # pylint: disable=non-parent-init-called
- BASECLASS.__init__(self, *args, **kwargs)
-
- def _dump_results(self, result):
- '''Return the text to output for a result.'''
- result['_ansible_verbose_always'] = True
-
- save = {}
- for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
- if key in result:
- save[key] = result.pop(key)
-
- output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
-
- for key in ['stdout', 'stderr', 'msg']:
- if key in save and save[key]:
- output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
-
- for key, value in save.items():
- result[key] = value
-
- return output
diff --git a/docs/proposals/playbook_consolidation.md b/docs/proposals/playbook_consolidation.md
new file mode 100644
index 000000000..98aedb021
--- /dev/null
+++ b/docs/proposals/playbook_consolidation.md
@@ -0,0 +1,178 @@
+# OpenShift-Ansible Playbook Consolidation
+
+## Description
+The designation of `byo` is no longer applicable due to being able to deploy on
+physical hardware or cloud resources using the playbooks in the `byo` directory.
+Consolidation of these directories will make maintaining the code base easier
+and provide a more straightforward project for users and developers.
+
+The main points of this proposal are:
+* Consolidate initialization playbooks into one set of playbooks in
+ `playbooks/init`.
+* Collapse the `playbooks/byo` and `playbooks/common` into one set of
+ directories at `playbooks/openshift-*`.
+
+This consolidation effort may be more appropriate when the project moves to
+using a container as the default installation method.
+
+## Design
+
+### Initialization Playbook Consolidation
+Currently there are two separate sets of initialization playbooks:
+* `playbooks/byo/openshift-cluster/initialize_groups.yml`
+* `playbooks/common/openshift-cluster/std_include.yml`
+
+Although these playbooks are located in the `openshift-cluster` directory they
+are shared by all of the `openshift-*` areas. These playbooks would be better
+organized in a `playbooks/init` directory collocated with all their related
+playbooks.
+
+In the example below, the following changes have been made:
+* `playbooks/byo/openshift-cluster/initialize_groups.yml` renamed to
+ `playbooks/init/initialize_host_groups.yml`
+* `playbooks/common/openshift-cluster/std_include.yml` renamed to
+ `playbooks/init/main.yml`
+* `- include: playbooks/init/initialize_host_groups.yml` has been added to the
+ top of `playbooks/init/main.yml`
+* All other related files for initialization have been moved to `playbooks/init`
+
+The `initialize_host_groups.yml` playbook is only one play with one task for
+importing variables for inventory group conversions. This task could be further
+consolidated with the play in `evaluate_groups.yml`.
+
+The new standard initialization playbook would be
+`playbooks/init/main.yml`.
+
+
+```
+
+> $ tree openshift-ansible/playbooks/init
+.
+├── evaluate_groups.yml
+├── initialize_facts.yml
+├── initialize_host_groups.yml
+├── initialize_openshift_repos.yml
+├── initialize_openshift_version.yml
+├── main.yml
+├── roles -> ../../roles
+├── validate_hostnames.yml
+└── vars
+ └── cluster_hosts.yml
+```
+
+```yaml
+# openshift-ansible/playbooks/init/main.yml
+---
+- include: initialize_host_groups.yml
+
+- include: evaluate_groups.yml
+
+- include: initialize_facts.yml
+
+- include: validate_hostnames.yml
+
+- include: initialize_openshift_repos.yml
+
+- include: initialize_openshift_version.yml
+```
+
+### `byo` and `common` Playbook Consolidation
+Historically, the `byo` directory coexisted with other platform directories
+which contained playbooks that then called into `common` playbooks to perform
+common installation steps for all platforms. Since the other platform
+directories have been removed this separation is no longer necessary.
+
+In the example below, the following changes have been made:
+* `playbooks/byo/openshift-master` renamed to
+ `playbooks/openshift-master`
+* `playbooks/common/openshift-master` renamed to
+ `playbooks/openshift-master/private`
+* Original `byo` entry point playbooks have been updated to include their
+ respective playbooks from `private/`.
+* Symbolic links have been updated as necessary
+
+All user consumable playbooks are in the root of `openshift-master` and no entry
+point playbooks exist in the `private` directory. Maintaining the separation
+between entry point playbooks and the private playbooks allows individual pieces
+of the deployments to be used as needed by other components.
+
+```
+openshift-ansible/playbooks/openshift-master
+> $ tree
+.
+├── config.yml
+├── private
+│   ├── additional_config.yml
+│   ├── config.yml
+│   ├── filter_plugins -> ../../../filter_plugins
+│   ├── library -> ../../../library
+│   ├── lookup_plugins -> ../../../lookup_plugins
+│   ├── restart_hosts.yml
+│   ├── restart_services.yml
+│   ├── restart.yml
+│   ├── roles -> ../../../roles
+│   ├── scaleup.yml
+│   └── validate_restart.yml
+├── restart.yml
+└── scaleup.yml
+```
+
+```yaml
+# openshift-ansible/playbooks/openshift-master/config.yml
+---
+- include: ../init/main.yml
+
+- include: private/config.yml
+```
+
+With the consolidation of the directory structure and component installs being
+removed from `openshift-cluster`, that directory is no longer necessary. To
+deploy an entire OpenShift cluster, a playbook would be created to tie together
+all of the different components. The following example shows how multiple
+components would be combined to perform a complete install.
+
+```yaml
+# openshift-ansible/playbooks/deploy_cluster.yml
+---
+- include: init/main.yml
+
+- include: openshift-etcd/private/config.yml
+
+- include: openshift-nfs/private/config.yml
+
+- include: openshift-loadbalancer/private/config.yml
+
+- include: openshift-master/private/config.yml
+
+- include: openshift-node/private/config.yml
+
+- include: openshift-glusterfs/private/config.yml
+
+- include: openshift-hosted/private/config.yml
+
+- include: openshift-service-catalog/private/config.yml
+```
+
+## User Story
+As a developer of OpenShift-Ansible,
+I want simplify the playbook directory structure
+so that users can easily find deployment playbooks and developers know where new
+features should be developed.
+
+## Implementation
+Given the size of this refactoring effort, it should be broken into smaller
+steps which can be completed independently while still maintaining a functional
+project.
+
+Steps:
+1. Update and merge consolidation of the initialization playbooks.
+2. Update each merge consolidation of each `openshift-*` component area
+3. Update and merge consolidation of `openshift-cluster`
+
+## Acceptance Criteria
+* Verify that all entry points playbooks install or configure as expected.
+* Verify that CI is updated for testing new playbook locations.
+* Verify that repo documentation is updated
+* Verify that user documentation is updated
+
+## References
diff --git a/files/origin-components/apiserver-config.yaml b/files/origin-components/apiserver-config.yaml
new file mode 100644
index 000000000..e4048d1da
--- /dev/null
+++ b/files/origin-components/apiserver-config.yaml
@@ -0,0 +1,4 @@
+kind: TemplateServiceBrokerConfig
+apiVersion: config.templateservicebroker.openshift.io/v1
+templateNamespaces:
+- openshift
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml
new file mode 100644
index 000000000..1b42597af
--- /dev/null
+++ b/files/origin-components/apiserver-template.yaml
@@ -0,0 +1,122 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: template-service-broker-apiserver
+parameters:
+- name: IMAGE
+ value: openshift/origin:latest
+- name: NAMESPACE
+ value: openshift-template-service-broker
+- name: LOGLEVEL
+ value: "0"
+- name: API_SERVER_CONFIG
+ value: |
+ kind: TemplateServiceBrokerConfig
+ apiVersion: config.templateservicebroker.openshift.io/v1
+ templateNamespaces:
+ - openshift
+objects:
+
+# to create the tsb server
+- apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+ labels:
+ apiserver: "true"
+ spec:
+ template:
+ metadata:
+ name: apiserver
+ labels:
+ apiserver: "true"
+ spec:
+ serviceAccountName: apiserver
+ containers:
+ - name: c
+ image: ${IMAGE}
+ imagePullPolicy: IfNotPresent
+ command:
+ - "/usr/bin/openshift"
+ - "start"
+ - "template-service-broker"
+ - "--secure-port=8443"
+ - "--audit-log-path=-"
+ - "--tls-cert-file=/var/serving-cert/tls.crt"
+ - "--tls-private-key-file=/var/serving-cert/tls.key"
+ - "--loglevel=${LOGLEVEL}"
+ - "--config=/var/apiserver-config/apiserver-config.yaml"
+ ports:
+ - containerPort: 8443
+ volumeMounts:
+ - mountPath: /var/serving-cert
+ name: serving-cert
+ - mountPath: /var/apiserver-config
+ name: apiserver-config
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8443
+ scheme: HTTPS
+ volumes:
+ - name: serving-cert
+ secret:
+ defaultMode: 420
+ secretName: apiserver-serving-cert
+ - name: apiserver-config
+ configMap:
+ defaultMode: 420
+ name: apiserver-config
+
+# to create the config for the TSB
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver-config
+ data:
+ apiserver-config.yaml: ${API_SERVER_CONFIG}
+
+# to be able to assign powers to the process
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to be able to expose TSB inside the cluster
+- apiVersion: v1
+ kind: Service
+ metadata:
+ namespace: ${NAMESPACE}
+ name: apiserver
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert
+ spec:
+ selector:
+ apiserver: "true"
+ ports:
+ - port: 443
+ targetPort: 8443
+
+# This service account will be granted permission to call the TSB.
+# The token for this SA will be provided to the service catalog for
+# use when calling the TSB.
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+
+# This secret will be populated with a copy of the templateservicebroker-client SA's
+# auth token. Since this secret has a static name, it can be referenced more
+# easily than the auto-generated secret for the service account.
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+ annotations:
+ kubernetes.io/service-account.name: templateservicebroker-client
+ type: kubernetes.io/service-account-token
diff --git a/files/origin-components/rbac-template.yaml b/files/origin-components/rbac-template.yaml
new file mode 100644
index 000000000..0937a9065
--- /dev/null
+++ b/files/origin-components/rbac-template.yaml
@@ -0,0 +1,92 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: template-service-broker-rbac
+parameters:
+- name: NAMESPACE
+ value: openshift-template-service-broker
+- name: KUBE_SYSTEM
+ value: kube-system
+objects:
+
+# Grant the service account permission to call the TSB
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: templateservicebroker-client
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:templateservicebroker-client
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-client
+
+# to delegate authentication and authorization
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: auth-delegator-${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: system:auth-delegator
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to have the template service broker powers
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: tsb-${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:controller:template-service-broker
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# to read the config for terminating authentication
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ namespace: ${KUBE_SYSTEM}
+ name: extension-apiserver-authentication-reader-${NAMESPACE}
+ roleRef:
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: apiserver
+
+# allow the kube service catalog's SA to read the static secret defined
+# above, which will contain the token for the SA that can call the TSB.
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: Role
+ metadata:
+ name: templateservicebroker-auth-reader
+ namespace: ${NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - templateservicebroker-client
+ resources:
+ - secrets
+ verbs:
+ - get
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ namespace: ${NAMESPACE}
+ name: templateservicebroker-auth-reader
+ roleRef:
+ kind: Role
+ name: templateservicebroker-auth-reader
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
diff --git a/files/origin-components/template-service-broker-registration.yaml b/files/origin-components/template-service-broker-registration.yaml
new file mode 100644
index 000000000..2086978f0
--- /dev/null
+++ b/files/origin-components/template-service-broker-registration.yaml
@@ -0,0 +1,25 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: template-service-broker-registration
+parameters:
+- name: TSB_NAMESPACE
+ value: openshift-template-service-broker
+- name: CA_BUNDLE
+ required: true
+objects:
+# register the tsb with the service catalog
+- apiVersion: servicecatalog.k8s.io/v1alpha1
+ kind: ServiceBroker
+ metadata:
+ name: template-service-broker
+ spec:
+ url: https://apiserver.${TSB_NAMESPACE}.svc:443/brokers/template.openshift.io
+ insecureSkipTLSVerify: false
+ caBundle: ${CA_BUNDLE}
+ authInfo:
+ bearer:
+ secretRef:
+ kind: Secret
+ name: templateservicebroker-client
+ namespace: ${TSB_NAMESPACE}
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 277695f78..83a05370a 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -707,15 +707,104 @@ def oo_openshift_env(hostvars):
if regex.match(key):
facts[key] = hostvars[key]
- migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
- 'openshift_registry_selector': 'openshift_hosted_registry_selector'}
- for old_fact, new_fact in migrations.items():
- if old_fact in facts and new_fact not in facts:
- facts[new_fact] = facts[old_fact]
return facts
# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
+def oo_component_persistent_volumes(hostvars, groups, component):
+ """ Generate list of persistent volumes based on oo_openshift_env
+ storage options set in host variables for a specific component.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if not issubclass(type(groups), dict):
+ raise errors.AnsibleFilterError("|failed expects groups is a dict")
+
+ persistent_volume = None
+
+ if component in hostvars['openshift']:
+ if 'storage' in hostvars['openshift'][component]:
+ params = hostvars['openshift'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ return persistent_volume
+
+
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
@@ -734,84 +823,122 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
+ if 'create_pv' in params:
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+ persistent_volumes.append(persistent_volume)
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ if 'logging' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'loggingops' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'metrics' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
return persistent_volumes
+def oo_component_pv_claims(hostvars, component):
+ """ Generate list of persistent volume claims based on oo_openshift_env
+ storage options set in host variables for a speicific component.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ if component in hostvars['openshift']:
+ if 'storage' in hostvars['openshift'][component]:
+ params = hostvars['openshift'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return persistent_volume_claim
+ return None
+
+
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables.
@@ -828,17 +955,31 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
+ if 'create_pv' in params:
+ if 'create_pvc' in params:
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'logging' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'loggingops' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'metrics' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
@@ -877,10 +1018,8 @@ def oo_pods_match_component(pods, deployment_type, component):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
image_prefix = 'openshift3/ose-'
- elif deployment_type == 'atomic-enterprise':
- image_prefix = 'aep3_beta/aep-'
matching_pods = []
image_regex = image_prefix + component + r'.*'
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
index 809e82488..c515f1a71 100644
--- a/filter_plugins/openshift_version.py
+++ b/filter_plugins/openshift_version.py
@@ -33,10 +33,10 @@ def legacy_gte_function_builder(name, versions):
returns True/False
"""
version_gte = False
- if 'enterprise' in deployment_type:
+ if deployment_type == 'openshift-enterprise':
if str(version) >= LooseVersion(enterprise_version):
version_gte = True
- elif 'origin' in deployment_type:
+ else:
if str(version) >= LooseVersion(origin_version):
version_gte = True
return version_gte
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 3110f409c..5245771d0 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -7,7 +7,7 @@ USER root
# Playbooks, roles, and their dependencies are installed from packages.
RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
&& yum repolist > /dev/null \
- && yum-config-manager --enable rhel-7-server-ose-3.6-rpms \
+ && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
&& rpm -q $INSTALL_PKGS \
diff --git a/images/installer/README_CONTAINER_IMAGE.md b/images/installer/README_CONTAINER_IMAGE.md
index bc1ebb4a8..bfe3661c0 100644
--- a/images/installer/README_CONTAINER_IMAGE.md
+++ b/images/installer/README_CONTAINER_IMAGE.md
@@ -45,4 +45,6 @@ These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``r
* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container
-* INVENTORY_FILE - Full path for the inventory to use from the host \ No newline at end of file
+* INVENTORY_FILE - Full path for the inventory to use from the host
+
+* INVENTORY_DIR - Full path for the inventory directory to use (e.g. for use with a hybrid dynamic/static inventory)
diff --git a/images/installer/root/exports/config.json.template b/images/installer/root/exports/config.json.template
index 739c0080f..1a009fa7b 100644
--- a/images/installer/root/exports/config.json.template
+++ b/images/installer/root/exports/config.json.template
@@ -24,7 +24,7 @@
"PLAYBOOK_FILE=$PLAYBOOK_FILE",
"ANSIBLE_CONFIG=$ANSIBLE_CONFIG"
],
- "cwd": "/opt/app-root/src/",
+ "cwd": "/usr/share/ansible/openshift-ansible",
"rlimits": [
{
"type": "RLIMIT_NOFILE",
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index 51ac566e5..cd38a6ff0 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -19,6 +19,9 @@ if [[ -v INVENTORY_FILE ]]; then
# Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
# does not attempt to modify the original
cp -a ${INVENTORY_FILE} ${INVENTORY}
+elif [[ -v INVENTORY_DIR ]]; then
+ INVENTORY="$(mktemp -d)"
+ cp -R ${INVENTORY_DIR}/* ${INVENTORY}
elif [[ -v INVENTORY_URL ]]; then
curl -o ${INVENTORY} ${INVENTORY_URL}
elif [[ -v DYNAMIC_SCRIPT_URL ]]; then
@@ -29,7 +32,7 @@ elif [[ -v GENERATE_INVENTORY ]]; then
/usr/local/bin/generate ${INVENTORY}
else
echo
- echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."
+ echo "One of INVENTORY_FILE, INVENTORY_DIR, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."
exec /usr/local/bin/usage
fi
INVENTORY_ARG="-i ${INVENTORY}"
@@ -39,7 +42,7 @@ if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then
fi
if [[ -v VAULT_PASS ]]; then
- VAULT_PASS_FILE=.vaultpass
+ VAULT_PASS_FILE="$(mktemp)"
echo ${VAULT_PASS} > ${VAULT_PASS_FILE}
VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}"
fi
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.example
index 0d60de6d2..0b6050891 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.example
@@ -27,24 +27,25 @@ ansible_ssh_user=root
debug_level=2
# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=openshift-enterprise
+openshift_deployment_type=origin
+#openshift_deployment_type=openshift-enterprise
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -58,9 +59,11 @@ openshift_release=v3.6
#openshift_use_etcd_system_container=False
#
# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
+# when openshift_deployment_type=='openshift-enterprise'
#system_images_registry="registry.access.redhat.com"
-# Install the openshift examples
+# Manage openshift example imagestreams and templates during install and upgrade
#openshift_install_examples=true
# Configure logoutURL in the master config for console customization
@@ -119,20 +122,20 @@ openshift_release=v3.6
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.12.1"
+
# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
# Uncomment below to disable; for example if your kernel does not support the
# Docker overlay/overlay2 storage drivers with SELinux enabled.
#openshift_docker_selinux_enabled=False
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -179,7 +182,7 @@ openshift_release=v3.6
#oreg_auth_credentials_replace: True
# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
#openshift_repos_enable_testing=false
# htpasswd auth
@@ -237,9 +240,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# CloudForms Management Engine (ManageIQ) App Install
#
# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_cfme/README.md for instructions
+# clusters only. See roles/openshift_management/README.md for instructions
# and requirements.
-#openshift_cfme_install_app=False
+#openshift_management_install_management=False
# Cloud Provider Configuration
#
@@ -346,7 +349,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs']
+#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
# OpenShift Router Options
#
@@ -432,6 +435,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# path using these options would be "/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
@@ -444,6 +449,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=nfs.example.com
+# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
+# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
@@ -457,7 +464,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_volume_size=10Gi
#
# AWS S3
-#
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
@@ -499,10 +505,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
+#openshift_metrics_install_metrics=true
#
# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
@@ -512,29 +518,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
+#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
@@ -543,10 +549,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7
+# when openshift_deployment_type=='openshift-enterprise'
+#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
+#openshift_metrics_image_version=v3.7
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -556,36 +565,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
+#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
+#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
@@ -595,13 +604,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
+#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
+# when openshift_deployment_type=='openshift-enterprise'
+#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
+#openshift_logging_image_version=3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -621,7 +633,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading these must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -641,17 +658,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 9
+# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
-# When upgrading this must be specificed!
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
#openshift_master_console_port=8443
-# set RPM version for debugging purposes
-#openshift_pkg_version=-3.1.0.0
+# set exact RPM version (include - prefix)
+#openshift_pkg_version=-3.6.0
+# you may also specify version and release, ie:
+#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7
# Configure custom ca certificate
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
@@ -663,6 +685,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure custom named certificates (SNI certificates)
#
+# https://docs.openshift.org/latest/install_config/certificate_customization.html
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
#
# NOTE: openshift_master_named_certificates is cached on masters and is an
@@ -707,11 +730,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# interface other than the default network interface.
#openshift_set_node_ip=True
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
@@ -732,6 +750,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
#openshift_use_dnsmasq=False
+
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
@@ -814,7 +833,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-# Enable API service auditing, available as of 3.2
+# Enable API service auditing
#openshift_master_audit_config={"enabled": true}
#
# In case you want more advanced setup for the auditlog you can
@@ -823,6 +842,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# exist
#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
@@ -871,9 +894,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# You may wish to disable these or make them non fatal
#
# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_pre_storage_migration_fatal=true
# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal==false
+# openshift_upgrade_post_storage_migration_fatal=false
# host group for masters
[masters]
@@ -893,3 +916,61 @@ ose3-lb-ansible.test.example.com containerized=false
[nodes]
ose3-master[1:3]-ansible.test.example.com
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+
+# CloudForms/ManageIQ (CFME/MIQ) Configuration
+
+# See the readme for full descriptions and getting started
+# instructions: ../../roles/openshift_management/README.md or go directly to
+# their definitions: ../../roles/openshift_management/defaults/main.yml
+# ../../roles/openshift_management/vars/main.yml
+#
+# Namespace for the CFME project
+#openshift_management_project: openshift-management
+
+# Namespace/project description
+#openshift_management_project_description: CloudForms Management Engine
+
+# Choose 'miq-template' for a podified database install
+# Choose 'miq-template-ext-db' for an external database install
+#
+# If you are using the miq-template-ext-db template then you must add
+# the required database parameters to the
+# openshift_management_template_parameters variable.
+#openshift_management_app_template: miq-template
+
+# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
+#openshift_management_storage_class: nfs
+
+# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
+# netapp appliance, then you must set the hostname here. Leave the
+# value as 'false' if you are not using external NFS.
+#openshift_management_storage_nfs_external_hostname: false
+
+# [OPTIONAL] - If you are using external NFS then you must set the base
+# path to the exports location here.
+#
+# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
+# that will back the application PV and optionally the database
+# pv. Export path definitions, relative to
+# {{ openshift_management_storage_nfs_base_dir }}
+#
+# LOCAL NFS NOTE:
+#
+# You may may also change this value if you want to change the default
+# path used for local NFS exports.
+#openshift_management_storage_nfs_base_dir: /exports
+
+# LOCAL NFS NOTE:
+#
+# You may override the automatically selected LOCAL NFS server by
+# setting this variable. Useful for testing specific task files.
+#openshift_management_storage_nfs_local_hostname: false
+
+# A hash of parameters you want to override or set in the
+# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
+# your inventory file as a simple hash. Acceptable values are defined
+# under the .parameters list in files/miq-template{-ext-db}.yaml
+# Example:
+#
+# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
+#openshift_management_template_parameters: {}
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
deleted file mode 100644
index dbe57bbd2..000000000
--- a/inventory/byo/hosts.origin.example
+++ /dev/null
@@ -1,892 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# Enable unsupported configurations, things that will yield a partially
-# functioning cluster but would not be supported for production use
-#openshift_enable_unsupported_configurations=false
-
-# SSH user, this user should allow ssh based auth without requiring a
-# password. If using ssh key based auth, then the key should be managed by an
-# ssh agent.
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_become must be set to true and the
-# user must be configured for passwordless sudo
-#ansible_become=yes
-
-# Debug level for all OpenShift components (Defaults to 2)
-debug_level=2
-
-# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=origin
-
-# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
-# rely on the version running on the first master. Works best for containerized installs where we can usually
-# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
-# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
-# release.
-openshift_release=v3.6
-
-# Specify an exact container image tag to install or configure.
-# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
-
-# Specify an exact rpm version to install or configure.
-# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
-
-# This enables all the system containers except for docker:
-#openshift_use_system_containers=False
-#
-# But you can choose separately each component that must be a
-# system container:
-#
-#openshift_use_openvswitch_system_container=False
-#openshift_use_node_system_container=False
-#openshift_use_master_system_container=False
-#openshift_use_etcd_system_container=False
-#
-# In either case, system_images_registry must be specified to be able to find the system images
-#system_images_registry="docker.io"
-
-# Install the openshift examples
-#openshift_install_examples=true
-
-# Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
-#openshift_master_logout_url=http://example.com
-
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_template=/path/to/login-template.html
-
-# Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
-#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
-
-# Configure master API rate limits for external clients
-#openshift_master_external_ratelimit_qps=200
-#openshift_master_external_ratelimit_burst=400
-# Configure master API rate limits for loopback clients
-#openshift_master_loopback_ratelimit_qps=300
-#openshift_master_loopback_ratelimit_burst=600
-
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Use Docker inside a System Container. Note that this is a tech preview and should
-# not be used to upgrade!
-# The following options for docker are ignored:
-# - docker_version
-# - docker_upgrade
-# The following options must not be used
-# - openshift_docker_options
-#openshift_docker_use_system_container=False
-# Instead of using docker, replacec it with cri-o
-# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
-# just as container-engine does.
-#openshift_use_crio=False
-# Force the registry to use for the docker/crio system container. By default the registry
-# will be built off of the deployment type and ansible_distribution. Only
-# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_registry_override="registry.example.com"
-# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
-# Default value: "--log-driver=journald"
-#openshift_docker_options="-l warn --ipv6=false"
-
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
-# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
-# Uncomment below to disable; for example if your kernel does not support the
-# Docker overlay/overlay2 storage drivers with SELinux enabled.
-#openshift_docker_selinux_enabled=False
-
-# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
-# docker_upgrade=False
-
-# Specify exact version of etcd to configure or upgrade to.
-# etcd_version="3.1.0"
-# Enable etcd debug logging, defaults to false
-# etcd_debug=true
-# Set etcd log levels by package
-# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
-
-# Upgrade Hooks
-#
-# Hooks are available to run custom tasks at various points during a cluster
-# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
-# absolute paths, if not the path will be treated as relative to the file where the
-# hook is actually used.
-#
-# Tasks to run before each master is upgraded.
-# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
-#
-# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
-# upgrade steps, but before we restart system/services.
-# openshift_master_upgrade_hook=/usr/share/custom/master.yml
-#
-# Tasks to run after each master is upgraded and system/services have been restarted.
-# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
-
-# Alternate image format string, useful if you've got your own registry mirror
-# Configure this setting just on node or master
-#oreg_url_master=example.com/openshift3/ose-${component}:${version}
-#oreg_url_node=example.com/openshift3/ose-${component}:${version}
-# For setting the configuration globally
-#oreg_url=example.com/openshift3/ose-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
-# modify image streams to point at that registry by setting the following to true
-#openshift_examples_modify_imagestreams=true
-
-# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-#openshift_repos_enable_testing=false
-
-# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
-# Defining htpasswd users
-#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
-# or
-#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
-
-# Allow all auth
-#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
-
-# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-#
-# Configure LDAP CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the LDAPPasswordIdentityProvider.
-#
-#openshift_master_ldap_ca=<ca text>
-# or
-#openshift_master_ldap_ca_file=<path to local ca file to use>
-
-# OpenID auth
-#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
-#
-# Configure OpenID CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the OpenIDIdentityProvider.
-#
-#openshift_master_openid_ca=<ca text>
-# or
-#openshift_master_openid_ca_file=<path to local ca file to use>
-
-# Request header auth
-#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
-#
-# Configure request header CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "clientCA"
-# key set within the RequestHeaderIdentityProvider.
-#
-#openshift_master_request_header_ca=<ca text>
-# or
-#openshift_master_request_header_ca_file=<path to local ca file to use>
-
-# CloudForms Management Engine (ManageIQ) App Install
-#
-# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_cfme/README.md for instructions
-# and requirements.
-#openshift_cfme_install_app=False
-
-# Cloud Provider Configuration
-#
-# Note: You may make use of environment variables rather than store
-# sensitive configuration within the ansible inventory.
-# For example:
-#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
-#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
-#
-# AWS
-#openshift_cloudprovider_kind=aws
-# Note: IAM profiles may be used instead of storing API credentials on disk.
-#openshift_cloudprovider_aws_access_key=aws_access_key_id
-#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
-#
-# Openstack
-#openshift_cloudprovider_kind=openstack
-#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
-#openshift_cloudprovider_openstack_username=username
-#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_domain_id=domain_id
-#openshift_cloudprovider_openstack_domain_name=domain_name
-#openshift_cloudprovider_openstack_tenant_id=tenant_id
-#openshift_cloudprovider_openstack_tenant_name=tenant_name
-#openshift_cloudprovider_openstack_region=region
-#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
-#
-# GCE
-#openshift_cloudprovider_kind=gce
-
-# Project Configuration
-#osm_project_request_message=''
-#osm_project_request_template=''
-#osm_mcs_allocator_range='s0:/2'
-#osm_mcs_labels_per_project=5
-#osm_uid_allocator_range='1000000000-1999999999/10000'
-
-# Configure additional projects
-#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
-
-# Enable cockpit
-#osm_use_cockpit=true
-#
-# Set cockpit plugins
-#osm_cockpit_plugins=['cockpit-kubernetes']
-
-# Native high availability cluster method with optional load balancer.
-# If no lb group is defined, the installer assumes that a load balancer has
-# been preconfigured. For installation the value of
-# openshift_master_cluster_hostname must resolve to the load balancer
-# or to one or all of the masters defined in the inventory if no load
-# balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Override the default controller lease ttl
-#osm_controller_lease_ttl=30
-
-# Configure controller arguments
-#osm_controller_args={'resource-quota-sync-period': ['10s']}
-
-# Configure api server arguments
-#osm_api_server_args={'max-requests-inflight': ['400']}
-
-# default subdomain to use for exposed routes
-#openshift_master_default_subdomain=apps.test.example.com
-
-# additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
-
-# default project node selector
-#osm_default_node_selector='region=primary'
-
-# Override the default pod eviction timeout
-#openshift_master_pod_eviction_timeout=5m
-
-# Override the default oauth tokenConfig settings:
-# openshift_master_access_token_max_seconds=86400
-# openshift_master_auth_token_max_seconds=500
-
-# Override master servingInfo.maxRequestsInFlight
-#openshift_master_max_requests_inflight=500
-
-# Override master and node servingInfo.minTLSVersion and .cipherSuites
-# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
-# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
-#openshift_master_min_tls_version=VersionTLS12
-#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-#
-#openshift_node_min_tls_version=VersionTLS12
-#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-
-# default storage plugin dependencies to install, by default the ceph and
-# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
-
-# OpenShift Router Options
-#
-# An OpenShift router will be created during install if there are
-# nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Router selector (optional)
-# Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
-#
-# Router replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift router selector.
-#openshift_hosted_router_replicas=2
-#
-# Router force subdomain (optional)
-# A router path format to force on all routes used by this router
-# (will ignore the route host value)
-#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
-#
-# Router certificate (optional)
-# Provide local certificate paths which will be configured as the
-# router's default certificate.
-#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
-#
-# Manage the OpenShift Router
-#openshift_hosted_manage_router=true
-#
-# Router sharding support has been added and can be achieved by supplying the correct
-# data to the inventory. The variable to house the data is openshift_hosted_routers
-# and is in the form of a list. If no data is passed then a default router will be
-# created. There are multiple combinations of router sharding. The one described
-# below supports routers on separate nodes.
-#
-#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
-
-# OpenShift Registry Console Options
-# Override the console image prefix for enterprise deployments, not used in origin
-# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
-#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
-# Override image version, defaults to latest for origin, matches the product version for enterprise
-#openshift_cockpit_deployer_version=1.4.1
-
-# Openshift Registry Options
-#
-# An OpenShift registry will be created during install if there are
-# nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Registry selector (optional)
-# Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
-#
-# Registry replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift registry selector.
-#openshift_hosted_registry_replicas=2
-#
-# Validity of the auto-generated certificate in days (optional)
-#openshift_hosted_registry_cert_expire_days=730
-#
-# Manage the OpenShift Registry
-#openshift_hosted_manage_registry=true
-
-# Registry Storage Options
-#
-# NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_host=nfs.example.com
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# Openstack
-# Volume must already exist.
-#openshift_hosted_registry_storage_kind=openstack
-#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_registry_storage_openstack_filesystem=ext4
-#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# AWS S3
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_encrypt=false
-#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
-#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Any S3 service (Minio, ExoScale, ...): Basically the same as above
-# but with regionendpoint configured
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_accesskey=access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
-#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Additional CloudFront Options. When using CloudFront all three
-# of the followingg variables must be defined.
-#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
-#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
-#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
-
-# Metrics deployment
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#
-# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
-#
-# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
-# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
-# Storage options A & B currently support only one cassandra pod which is
-# generally enough for up to 1000 pods. Additional volumes can be created
-# manually after the fact and metrics scaled per the docs.
-#
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
-#
-# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_metrics/README.md
-#
-# Override metricsPublicURL in the master config for cluster metrics
-# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
-# Currently, you may only alter the hostname portion of the url, alterting the
-# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
-# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_metrics_deployer_version=v3.6.0
-#
-# StorageClass
-# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
-#
-
-# Logging deployment
-#
-# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
-#
-# Logging storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
-#
-# Option D - none -- Logging will use emptydir volumes which are destroyed when
-# pods are deleted
-#
-# Other Logging Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_logging/README.md
-#
-# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
-# Configure the number of elastic search nodes, unless you're using dynamic provisioning
-# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
-# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=v3.6.0
-
-# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
-# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
-
-# Disable the OpenShift SDN plugin
-# openshift_use_openshift_sdn=False
-
-# Configure SDN cluster network and kubernetes service CIDR blocks. These
-# network blocks should be private and should not conflict with network blocks
-# in your infrastructure that pods may require access to. Can not be changed
-# after deployment.
-#
-# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
-# 172.17.0.0/16. Your installation will fail and/or your configuration change will
-# cause the Pod SDN or Cluster SDN to fail.
-#
-# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
-# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
-# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading these must be specificed!
-#osm_cluster_network_cidr=10.128.0.0/14
-#openshift_portal_net=172.30.0.0/16
-
-# ExternalIPNetworkCIDRs controls what values are acceptable for the
-# service external IP field. If empty, no externalIP may be set. It
-# may contain a list of CIDRs which are checked for access. If a CIDR
-# is prefixed with !, IPs in that CIDR will be rejected. Rejections
-# will be applied first, then the IP checked against one of the
-# allowed CIDRs. You should ensure this range does not overlap with
-# your nodes, pods, or service CIDRs for security reasons.
-#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-
-# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
-# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
-# be assigned. It may contain a single CIDR that will be allocated from. For
-# security reasons, you should ensure that this range does not overlap with
-# the CIDRs reserved for external IPs, nodes, pods, or services.
-#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-
-# Configure number of bits to allocate to each host’s subnet e.g. 9
-# would mean a /23 network on the host.
-# When upgrading this must be specificed!
-#osm_host_subnet_length=9
-
-# Configure master API and console ports.
-#openshift_master_api_port=8443
-#openshift_master_console_port=8443
-
-# set RPM version for debugging purposes
-#openshift_pkg_version=-1.1
-
-# Configure custom ca certificate
-#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
-#
-# NOTE: CA certificate will not be replaced with existing clusters.
-# This option may only be specified when creating a new cluster or
-# when redeploying cluster certificates with the redeploy-certificates
-# playbook.
-
-# Configure custom named certificates (SNI certificates)
-#
-# https://docs.openshift.org/latest/install_config/certificate_customization.html
-#
-# NOTE: openshift_master_named_certificates is cached on masters and is an
-# additive fact, meaning that each run with a different set of certificates
-# will add the newly provided certificates to the cached set of certificates.
-#
-# An optional CA may be specified for each named certificate. CAs will
-# be added to the OpenShift CA bundle which allows for the named
-# certificate to be served for internal cluster communication.
-#
-# If you would like openshift_master_named_certificates to be overwritten with
-# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates=true
-#
-# Provide local certificate paths which will be deployed to masters
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
-#
-# Detected names may be overridden by specifying the "names" key
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
-
-# Session options
-#openshift_master_session_name=ssn
-#openshift_master_session_max_seconds=3600
-
-# An authentication and encryption secret will be generated if secrets
-# are not provided. If provided, openshift_master_session_auth_secrets
-# and openshift_master_encryption_secrets must be equal length.
-#
-# Signing secrets, used to authenticate sessions using
-# HMAC. Recommended to use secrets with 32 or 64 bytes.
-#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-#
-# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
-# characters long, to select AES-128, AES-192, or AES-256.
-#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-
-# configure how often node iptables rules are refreshed
-#openshift_node_iptables_sync_period=5s
-
-# Configure nodeIP in the node config
-# This is needed in cases where node traffic is desired to go over an
-# interface other than the default network interface.
-#openshift_set_node_ip=True
-
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
-# Configure dnsIP in the node config
-#openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
-
-# Configure logrotate scripts
-# See: https://github.com/nickhammond/ansible-logrotate
-#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-
-# openshift-ansible will wait indefinitely for your input when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
-# interfaces. This mis-configuration is problematic for any pod leveraging host
-# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
-
-# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
-# in versions >= 3.6
-#openshift_use_dnsmasq=False
-
-# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet or to set
-# options like 'strict-order' to alter dnsmasq configuration.
-#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
-
-# Global Proxy Configuration
-# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
-# variables for docker and master services.
-#
-# Hosts in the openshift_no_proxy list will NOT use any globally
-# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
-# (.example.com), and hosts (example.com), and IP addresses.
-#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
-#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
-#openshift_no_proxy='.hosts.example.com,some-host.com'
-#
-# Most environments don't require a proxy between openshift masters, nodes, and
-# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
-# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above instead.
-#
-# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
-# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
-# variable (above) and set this value to False
-#openshift_generate_no_proxy_hosts=True
-#
-# These options configure the BuildDefaults admission controller which injects
-# configuration into Builds. Proxy related values will default to the global proxy
-# config values. You only need to set these if they differ from the global proxy settings.
-# See BuildDefaults documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=mycorp.com
-#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_no_proxy=mycorp.com
-#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
-#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256Mi
-#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512Mi
-
-# Or you may optionally define your own build defaults configuration serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
-
-# These options configure the BuildOverrides admission controller which injects
-# configuration into Builds.
-# See BuildOverrides documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_buildoverrides_force_pull=true
-#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
-
-# Or you may optionally define your own build overrides configuration serialized as json
-#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
-#openshift_template_service_broker_namespaces=['openshift']
-
-# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
-
-# Admission plugin config
-#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
-
-# Configure usage of openshift_clock role.
-#openshift_clock_enabled=true
-
-# OpenShift Per-Service Environment Variables
-# Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
-# API and controllers environment variables are merged in single
-# master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-
-# Enable API service auditing, available as of 1.3
-#openshift_master_audit_config={"enabled": true}
-#
-# In case you want more advanced setup for the auditlog you can
-# use this line.
-# The directory in "auditFilePath" will be created if it's not
-# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
-
-# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
-#openshift_enable_origin_repo=false
-
-# Validity of the auto-generated OpenShift certificates in days.
-# See also openshift_hosted_registry_cert_expire_days above.
-#
-#openshift_ca_cert_expire_days=1825
-#openshift_node_cert_expire_days=730
-#openshift_master_cert_expire_days=730
-
-# Validity of the auto-generated external etcd certificates in days.
-# Controls validity for etcd CA, peer, server and client certificates.
-#
-#etcd_ca_default_days=1825
-#
-# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
-# openshift_master_saconfig_limitsecretreferences=false
-
-# Upgrade Control
-#
-# By default nodes are upgraded in a serial manner one at a time and all failures
-# are fatal, one set of variables for normal nodes, one set of variables for
-# nodes that are part of control plane as the number of hosts may be different
-# in those two groups.
-#openshift_upgrade_nodes_serial=1
-#openshift_upgrade_nodes_max_fail_percentage=0
-#openshift_upgrade_control_plane_nodes_serial=1
-#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
-#
-# You can specify the number of nodes to upgrade at once. We do not currently
-# attempt to verify that you have capacity to drain this many nodes at once
-# so please be careful when specifying these values. You should also verify that
-# the expected number of nodes are all schedulable and ready before starting an
-# upgrade. If it's not possible to drain the requested nodes the upgrade will
-# stall indefinitely until the drain is successful.
-#
-# If you're upgrading more than one node at a time you can specify the maximum
-# percentage of failure within the batch before the upgrade is aborted. Any
-# nodes that do fail are ignored for the rest of the playbook run and you should
-# take care to investigate the failure and return the node to service so that
-# your cluster.
-#
-# The percentage must exceed the value, this would fail on two failures
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
-# where as this would not
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
-#
-# Multiple data migrations take place and if they fail they will fail the upgrade
-# You may wish to disable these or make them non fatal
-#
-# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal==true
-# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal==false
-
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/lookup_plugins/README.md b/lookup_plugins/README.md
new file mode 100644
index 000000000..f05d608e5
--- /dev/null
+++ b/lookup_plugins/README.md
@@ -0,0 +1 @@
+openshift-ansible lookup plugins.
diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py
deleted file mode 100644
index 4581cb6b8..000000000
--- a/lookup_plugins/oo_option.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-'''
-oo_option lookup plugin for openshift-ansible
-
-Usage:
-
- - debug:
- msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
-
-This returns, by order of priority:
-
-* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
-* if it exists, the envirnoment variable named `<key>`
-* if none of the above conditions are met, empty string is returned
-'''
-
-
-import os
-
-# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
-try:
- # ansible-2.0
- from ansible.plugins.lookup import LookupBase
-except ImportError:
- # ansible-1.9.x
- class LookupBase(object):
- def __init__(self, basedir=None, runner=None, **kwargs):
- self.runner = runner
- self.basedir = self.runner.basedir
-
- def get_basedir(self, variables):
- return self.basedir
-
-
-# Reason: disable too-few-public-methods because the `run` method is the only
-# one required by the Ansible API
-# Status: permanently disabled
-# pylint: disable=too-few-public-methods
-class LookupModule(LookupBase):
- ''' oo_option lookup plugin main class '''
-
- # Reason: disable unused-argument because Ansible is calling us with many
- # parameters we are not interested in.
- # The lookup plugins of Ansible have this kwargs “catch-all” parameter
- # which is not used
- # Status: permanently disabled unless Ansible API evolves
- # pylint: disable=unused-argument
- def __init__(self, basedir=None, **kwargs):
- ''' Constructor '''
- self.basedir = basedir
-
- # Reason: disable unused-argument because Ansible is calling us with many
- # parameters we are not interested in.
- # The lookup plugins of Ansible have this kwargs “catch-all” parameter
- # which is not used
- # Status: permanently disabled unless Ansible API evolves
- # pylint: disable=unused-argument
- def run(self, terms, variables, **kwargs):
- ''' Main execution path '''
-
- ret = []
-
- for term in terms:
- option_name = term.split()[0]
- cli_key = 'cli_' + option_name
- if 'vars' in variables and cli_key in variables['vars']:
- ret.append(variables['vars'][cli_key])
- elif option_name in os.environ:
- ret.append(os.environ[option_name])
- else:
- ret.append('')
-
- return ret
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 3be13145e..5232d90f2 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.126.0%{?dist}
+Release: 0.150.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -84,10 +84,6 @@ touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
popd
-# openshift_master_facts symlinks lookup_plugins/oo_option.py from ansible_plugins/lookup_plugins
-pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/lookup_plugins
-ln -sf ../../../../../ansible_plugins/lookup_plugins/oo_option.py oo_option.py
-popd
# openshift-ansible-filter-plugins install
cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
@@ -280,6 +276,367 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.150.0
+- Ensure upgrade playbook exits on health check failures (rteague@redhat.com)
+- Ensure docker is installed for containerized load balancers
+ (mgugino@redhat.com)
+- Fix containerized node service unit placement order (mgugino@redhat.com)
+- Provisioning Documentation Updates (mgugino@redhat.com)
+
+* Thu Oct 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.149.0
+- Fix broken debug_level (mgugino@redhat.com)
+- Ensure host was reached for proper conditional validation
+ (rteague@redhat.com)
+- Ensure docker service status actually changes (mgugino@redhat.com)
+- Display warnings at the end of the control plane upgrade (sdodson@redhat.com)
+- Force reconciliation of role for 3.6 (simo@redhat.com)
+- Remove etcd health check (sdodson@redhat.com)
+- migrate embedded etcd to external etcd (jchaloup@redhat.com)
+
+* Wed Oct 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.148.0
+- Bug 1490647 - logging-fluentd deployed with openshift_logging_use_mux=false
+ fails to start due to missing (nhosoi@redhat.com)
+- Fix typo in inventory example (rteague@redhat.com)
+- Separate tuned daemon setup into a role. (jmencak@redhat.com)
+- crio, docker: expect openshift_release to have 'v' (gscrivan@redhat.com)
+- rebase on master (maxamillion@fedoraproject.org)
+- Add fedora compatibility (maxamillion@fedoraproject.org)
+- Allow checkpoint status to work across all groups (rteague@redhat.com)
+- Add valid search when search does not exist on resolv.conf
+ (nakayamakenjiro@gmail.com)
+
+* Tue Oct 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.147.0
+- Add PartOf to docker systemd service unit. (mgugino@redhat.com)
+- crio: use systemd manager (gscrivan@redhat.com)
+- Ensure servingInfo.clientCA is set as ca.crt rather than ca-bundle.crt.
+ (abutcher@redhat.com)
+- crio, docker: use openshift_release when openshift_image_tag is not used
+ (gscrivan@redhat.com)
+- crio: fix typo (gscrivan@redhat.com)
+- Update registry_config.j2 (jialiu@redhat.com)
+- Update registry_config.j2 (jialiu@redhat.com)
+
+* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.146.0
+- docker_image_availability: credentials to skopeo (mgugino@redhat.com)
+- Rename openshift_cfme role to openshift_management (tbielawa@redhat.com)
+
+* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.145.0
+- add missing restart node handler to flannel (jchaloup@redhat.com)
+- Switch to configmap leader election on 3.7 upgrade (mkhan@redhat.com)
+- crio.conf.j2: sync from upstream (gscrivan@redhat.com)
+- cri-o: use overlay instead of overlay2 (gscrivan@redhat.com)
+- Ensure docker is restarted when iptables is restarted (mgugino@redhat.com)
+- Stop including origin and ose hosts example file (sdodson@redhat.com)
+- node: make node service PartOf=openvswitch.service when openshift-sdn is used
+ (dcbw@redhat.com)
+
+* Fri Oct 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.144.0
+- fix typo for default in etcd (mgugino@redhat.com)
+- Bumping version of service catalog image for 3.7 (ewolinet@redhat.com)
+- remove duplicate [OSEv3:children] group (jfchevrette@gmail.com)
+- Fix lint error (tbielawa@redhat.com)
+- Update hosts.ose.example (ephillipe@gmail.com)
+- Remove the no-longer-used App/DB pv size override variables from inventories
+ (tbielawa@redhat.com)
+- openshift_checks: lb and nfs do not need docker (lmeyer@redhat.com)
+- openshift_checks: use oo group names everywhere (lmeyer@redhat.com)
+- Add notes about SA token. Improve NFS validation. (tbielawa@redhat.com)
+- Hooks for installing CFME during full openshift installation
+ (tbielawa@redhat.com)
+- Documentation (tbielawa@redhat.com)
+- Import upstream templates. Do the work. Validate parameters.
+ (tbielawa@redhat.com)
+- CFME 4.6 work begins. CFME 4.5 references added to the release-3.6 branch
+ (tbielawa@redhat.com)
+- Update hosts.origin.example (ephillipe@gmail.com)
+- Add logging es prometheus endpoint (jcantril@redhat.com)
+- bug 1497401. Default logging and metrics images to 3.7 (jcantril@redhat.com)
+- Ensure docker service started prior to credentials (mgugino@redhat.com)
+- Adding support for an inventory directory/hybrid inventory
+ (esauer@redhat.com)
+- Remove unused tasks file in openshift_named_certificates (rteague@redhat.com)
+- Move node cert playbook into node config path (rteague@redhat.com)
+- Move master cert playbooks into master config path (rteague@redhat.com)
+- Move etcd cert playbooks into etcd config path (rteague@redhat.com)
+- Fix hosted selector variable migration (mgugino@redhat.com)
+- Bug 1496271 - Perserve SCC for ES local persistent storage
+ (jcantril@redhat.com)
+- Limit hosts that run openshift_version role (mgugino@redhat.com)
+- Update ansible-service-broker config to track latest broker
+ (fabian@fabianism.us)
+- fix master-facts for provisioning (mgugino@redhat.com)
+- Make provisioning steps more reusable (mgugino@redhat.com)
+- logging: honor openshift_logging_es_cpu_limit (jwozniak@redhat.com)
+- Addressing tox issues (ewolinet@redhat.com)
+- bug 1482661. Preserve ES dc nodeSelector and supplementalGroups
+ (jcantril@redhat.com)
+- Checking if any openshift_*_storage_kind variables are set to dynamic without
+ enabling dynamic provisioning (ewolinet@redhat.com)
+- Removing setting pvc size and dynamic to remove looped var setting
+ (ewolinet@redhat.com)
+
+* Wed Oct 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.143.0
+- Limit base-package install during master upgrades (mgugino@redhat.com)
+- Fix provisiong scale group and elb logic (mgugino@redhat.com)
+
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.142.0
+- Document that nfs_directory must conform to DNS-1123 (sdodson@redhat.com)
+- Move node aws credentials to config.yml (mgugino@redhat.com)
+- Use etcd_ip when communicating with the cluster as a peer in etcd scaleup.
+ (abutcher@redhat.com)
+- Ensure openshift.common.portal_net updated during scaleup.
+ (abutcher@redhat.com)
+- docker: fix some tox warnings (gscrivan@redhat.com)
+- Require openshift_image_tag in the inventory with openshift-enterprise
+ (gscrivan@redhat.com)
+- crio: use the image_tag on RHEL (gscrivan@redhat.com)
+- docker: use the image_tag on RHEL (gscrivan@redhat.com)
+
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.141.0
+- Restore registires to /etc/sysconfig/docker (mgugino@redhat.com)
+- Fix Prometheus byo entry point (rteague@redhat.com)
+- Update to the openshift_aws style scheme for variables (ccoleman@redhat.com)
+
+* Tue Oct 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.140.0
+- openshift_checks: Fix incorrect list cast (smilner@redhat.com)
+- lib/base: Allow for empty option value (jarrpa@redhat.com)
+
+* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.139.0
+- Fix mispelling in error message yammlint -> yamllint (simo@redhat.com)
+- Separate certificate playbooks. (abutcher@redhat.com)
+- Reverting using uninstall variables for logging and metrics
+ (ewolinet@redhat.com)
+- Add --image flag to setup-openshift-heketi-storage (ttindell@isenpai.com)
+
+* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.138.0
+- Fix typo in openshift_default_storage_class/README (hansmi@vshn.ch)
+- GlusterFS: make ServiceAccounts privileged when either glusterfs or heketi is
+ native (jarrpa@redhat.com)
+- Fix some provisioning variables (mgugino@redhat.com)
+
+* Mon Oct 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.137.0
+- openshift_node: Add MASTER_SERVICE on system container install
+ (smilner@redhat.com)
+- openshift_node: Set DOCKER_SERVICE for system container (smilner@redhat.com)
+
+* Sun Oct 01 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.136.0
+- Include openshift_hosted when redeploying router certificates to handle auto-
+ generated wildcard certificate or custom openshift_hosted_router_certificate.
+ (abutcher@redhat.com)
+- Check for router service annotations when redeploying router certificates.
+ (abutcher@redhat.com)
+- Remove oo_option symlink from specfile. (abutcher@redhat.com)
+- Add a README.md to lookup_plugins/ (abutcher@redhat.com)
+- Remove oo_option facts. (abutcher@redhat.com)
+- block 3.6->3.7 upgrade if storage backend is not set to etcd3
+ (jchaloup@redhat.com)
+- Changes necessary to support AMI building (mgugino@redhat.com)
+
+* Sat Sep 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.135.0
+- fix whitespace for centos repos (jdetiber@redhat.com)
+- Fix registry auth variable (mgugino@redhat.com)
+- move health-checks and control-plane-verification before excluders
+ (jchaloup@redhat.com)
+- Fix typo in files (Docker registries) (william17.burton@gmail.com)
+- Registering the broker for TSB (ewolinet@redhat.com)
+- Quick formatting updates to the logging README. (steveqtran@gmail.com)
+- openshift_facts: coerce docker_use_system_container to bool
+ (smilner@redhat.com)
+- Migrate enterprise registry logic to docker role (mgugino@redhat.com)
+- minor update to README and removed dead file (steveqtran@gmail.com)
+- Added new variables for logging role for remote-syslog plugin
+ (steveqtran@gmail.com)
+- Remove some reminants of Atomic Enterprise (sdodson@redhat.com)
+- Allow examples management to be disabled (sdodson@redhat.com)
+- rename vars to avoid double negatives and ensuing confusion
+ (jsanda@redhat.com)
+- set prometheus endpoint properties to false by default (jsanda@redhat.com)
+- add options to disable prometheus endpoints (jsanda@redhat.com)
+- Enable JMX reporting of internal metrics (jsanda@redhat.com)
+
+* Thu Sep 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.134.0
+- OpenShift-Ansible Installer Checkpointing (rteague@redhat.com)
+- evaluate etcd_backup_tag variable (jchaloup@redhat.com)
+
+* Thu Sep 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.133.0
+- papr: use F26 container for extended tests (jlebon@redhat.com)
+- Fix typo in drop_etcdctl.yml (hansmi@vshn.ch)
+- Rename filter_plugins to unique names (rteague@redhat.com)
+- Fix missing quotes on openshift_aws_build_ami_ssh_user default
+ (mgugino@redhat.com)
+- papr: Workaround for RHBZ#1483553 (smilner@redhat.com)
+- Adding default for volume size if not set (ewolinet@redhat.com)
+- Fix origin repo deployment (mgugino@redhat.com)
+- More variables in AWS provisioning plays (mgugino@redhat.com)
+- Support installation of NetworkManager for provisioned nodes
+ (mgugino@redhat.com)
+- Set the etcd backend quota to 4GB by default (jchaloup@redhat.com)
+- logging: introducing event router (jwozniak@redhat.com)
+- logging: fix kibana and kibana-ops defaults (jwozniak@redhat.com)
+- papr: Use Fedora 26 (smilner@redhat.com)
+
+* Wed Sep 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.132.0
+- make difference filter output a list for Python3 (jchaloup@redhat.com)
+- Updating to check for netnamespace kube-service-catalog to be ready
+ (ewolinet@redhat.com)
+- consolidate etcd_common role (jchaloup@redhat.com)
+- Fluentd: one output tag, one output plugin (nhosoi@redhat.com)
+
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.131.0
+- Generate aggregator api client config in temporary directory.
+ (abutcher@redhat.com)
+
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.130.0
+- Passing in image parameter for tsb template (ewolinet@redhat.com)
+
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.129.0
+- Refactor openshift_hosted plays and role (mgugino@redhat.com)
+- Remove logging ES_COPY feature (jcantril@redhat.com)
+
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.128.0
+- check if the storage backend is set to etcd3 before upgrading to 3.7
+ (jchaloup@redhat.com)
+- crio: detect the correct version of the images (gscrivan@redhat.com)
+- crio: set the correct image name with OSE (gscrivan@redhat.com)
+- resolve #5428: python-dbus not found (ltheisen@mitre.org)
+- Updating default behavior for installing metrics and logging. Separating out
+ uninstall to own variable (ewolinet@redhat.com)
+- Add booleans to prevent unwanted install of nuage roles. (mgugino@redhat.com)
+- Set master facts prior to adding new etcd client urls to master config.
+ (abutcher@redhat.com)
+- Remove debugging statements and pause module (sdodson@redhat.com)
+- Fix registry_auth logic for upgrades (mgugino@redhat.com)
+- crio: skip installation on lbs and nfs nodes (gscrivan@redhat.com)
+- Remove override default.py callback plugin (rteague@redhat.com)
+- consolidate etcd_migrate role (jchaloup@redhat.com)
+- Add python3-PyYAML for Fedora installs (mgugino@redhat.com)
+- Do a full stop/start when etcd certificates had expired.
+ (abutcher@redhat.com)
+- Move additional/block/insecure registires to /etc/containers/registries.conf
+ (mgugino@redhat.com)
+- Improve CA playbook restart logic and skip restarts when related services had
+ previously expired certificates. (abutcher@redhat.com)
+- health checks: add diagnostics check (lmeyer@redhat.com)
+- Remove unused openshift_hosted_logging role (mgugino@redhat.com)
+- consolidate etcd_upgrade role (jchaloup@redhat.com)
+- disable excluders after all pre-checks (jchaloup@redhat.com)
+- Fixed AnsibleUnsafeText by converting to int (edu@redhat.com)
+- Ensure that hostname is lowercase (sdodson@redhat.com)
+- Fix deprecated subscription-manager command
+ (bliemli@users.noreply.github.com)
+- Returning actual results of yedit query. Empty list was returning empty
+ dict. (kwoodson@redhat.com)
+- Default openshift_pkg_version to full version-release during upgrades
+ (sdodson@redhat.com)
+- Creating structure to warn for use of deprecated variables and set them in a
+ single location before they are no longer honored (ewolinet@redhat.com)
+- Remove default value for oreg_url (mgugino@redhat.com)
+- Creating initial tsb role to consume and apply templates provided for tsb
+ (ewolinet@redhat.com)
+- Set network facts using first master's config during scaleup.
+ (abutcher@redhat.com)
+- Use 3.7 RPM repo (ahaile@redhat.com)
+- Changes for Nuage atomic ansible install
+ (rohan.s.parulekar@nuagenetworks.net)
+- Add 3.7 scheduler predicates (jsafrane@redhat.com)
+- Consolidate etcd certs roles (jchaloup@redhat.com)
+- GlusterFS can now be run more than once. Ability to add devices to nodes
+ (ttindell@isenpai.com)
+- Ensure valid search on resolv.conf (mateus.caruccio@getupcloud.com)
+- move (and rename) get_dns_ip filter into openshift_node_facts
+ (jdiaz@redhat.com)
+- cri-o: Allow full image override (smilner@redhat.com)
+
+* Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0
+- Updating to always configure api aggregation with installation
+ (ewolinet@redhat.com)
+- Do not reconcile in >= 3.7 (simo@redhat.com)
+- Cleanup old deployment types (mgugino@redhat.com)
+- crio: ensure no default CNI configuration files are left
+ (gscrivan@redhat.com)
+- node: specify the DNS domain (gscrivan@redhat.com)
+- more retries on repoquery_cmd (lmeyer@redhat.com)
+- fix etcd back message error (jchaloup@redhat.com)
+- openshift_checks: enable providing file outputs (lmeyer@redhat.com)
+- Fix registry auth task ordering (mgugino@redhat.com)
+- Prometheus role fixes (zgalor@redhat.com)
+- papr: Update inventory to include required vars (smilner@redhat.com)
+- testing: Skip net vars on integration tests (smilner@redhat.com)
+- inventory: Update network variable doc (smilner@redhat.com)
+- installer image: use tmp file for vaultpass (lmeyer@redhat.com)
+- system container: use ansible root as cwd (lmeyer@redhat.com)
+- openshift_sanitize_inventory: Check for required vars (smilner@redhat.com)
+- No conversion to boolean and no quoting for include_granted_scopes.
+ (jpazdziora@redhat.com)
+- Correct firewall install for openshift-nfs (rteague@redhat.com)
+- inventory: Update versions to 3.7 (smilner@redhat.com)
+- Port origin-gce roles for cluster setup to copy AWS provisioning
+ (ccoleman@redhat.com)
+- Bug 1491636 - honor openshift_logging_es_ops_nodeselector
+ (jwozniak@redhat.com)
+- Setup tuned after the node has been restarted. (jmencak@redhat.com)
+- Only attempt to start iptables on hosts in the current batch
+ (sdodson@redhat.com)
+- Removing setting of pod presets (ewolinet@redhat.com)
+- cri-o: Fix Fedora image name (smilner@redhat.com)
+- add retry on repoquery_cmd (lmeyer@redhat.com)
+- add retries to repoquery module (lmeyer@redhat.com)
+- Rework openshift-cluster into deploy_cluster.yml (rteague@redhat.com)
+- inventory generate: fix config doc (lmeyer@redhat.com)
+- inventory generate: remove refs to openshift_cluster_user (lmeyer@redhat.com)
+- inventory generate: always use kubeconfig, no login (lmeyer@redhat.com)
+- Scaffold out the entire build defaults hash (tbielawa@redhat.com)
+- Use openshift.common.ip rather than ansible_default_ipv4 in etcd migration
+ playbook. (abutcher@redhat.com)
+- Add IMAGE_VERSION to the image stream tag source (sdodson@redhat.com)
+- Add loadbalancer config entry point (rteague@redhat.com)
+- pull openshift_master deps out into a play (jchaloup@redhat.com)
+- Don't assume storage_migration control variables are already boolean
+ (mchappel@redhat.com)
+- upgrade: Updates warning on missing required variables (smilner@redhat.com)
+- Update master config with new client urls during etcd scaleup.
+ (abutcher@redhat.com)
+- Increase rate limiting in journald.conf (maszulik@redhat.com)
+- Correct logic for openshift_hosted_*_wait (rteague@redhat.com)
+- Adding mangagement-admin SC to admin role for management-infra project
+ (ewolinet@redhat.com)
+- Only install base openshift package on masters and nodes (mgugino@redhat.com)
+- Workaround Ansible Jinja2 delimiter warning (rteague@redhat.com)
+- openshift-checks: add role symlink (lmeyer@redhat.com)
+- double the required disk space for etcd backup (jchaloup@redhat.com)
+- openshift_health_check: allow disabling all checks (lmeyer@redhat.com)
+- docker_image_availability: fix local image search (lmeyer@redhat.com)
+- docker_image_availability: probe registry connectivity (lmeyer@redhat.com)
+- openshift_checks: add retries in python (lmeyer@redhat.com)
+- add inventory-generator under new sub pkg (jvallejo@redhat.com)
+- Re-enabling new tuned profile hierarchy (PR5089) (jmencak@redhat.com)
+- Add `openshift_node_open_ports` to allow arbitrary firewall exposure
+ (ccoleman@redhat.com)
+- Fix: authenticated registry support for containerized hosts
+ (mgugino@redhat.com)
+- [Proposal] OpenShift-Ansible Proposal Process (rteague@redhat.com)
+- Improve searching when conditions for Jinja2 delimiters (rteague@redhat.com)
+- Clarify requirement of having etcd group (sdodson@redhat.com)
+- add health checks 3_6,3_7 upgrade path (jvallejo@redhat.com)
+- container-engine: Allow full image override (smilner@redhat.com)
+- Add openshift_public_hostname length check (mgugino@redhat.com)
+- Skip failure dedup instead of crashing (rhcarvalho@gmail.com)
+- Properly quote "true" and "false" strings for include_granted_scopes.
+ (jpazdziora@redhat.com)
+- Move sysctl.conf customizations to a separate file (jdesousa@redhat.com)
+- Fix new_master or new_node fail check (denverjanke@gmail.com)
+- [Proposal] OpenShift-Ansible Playbook Consolidation (rteague@redhat.com)
+- GlusterFS: Allow option to use or ignore default node selectors
+ (jarrpa@redhat.com)
+- GlusterFS: Clarify heketi URL documentation (jarrpa@redhat.com)
+- GlusterFS: Add files/templates for v3.7 (jarrpa@redhat.com)
+- Support setting annotations on Hawkular route (hansmi@vshn.ch)
+- add additional preflight checks to upgrade path (jvallejo@redhat.com)
+- hot fix for env variable resolve (m.judeikis@gmail.com)
+- GlusterFS: Correct firewall port names (jarrpa@redhat.com)
+- Make RH subscription more resilient to temporary failures
+ (lhuard@amadeus.com)
+
* Mon Sep 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.126.0
- Fix rpm version logic for hosts (mgugino@redhat.com)
- Revert back to hostnamectl and previous default of not setting hostname
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
deleted file mode 100644
index 3c157bbf3..000000000
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# This deletes *ALL* Docker images, and uninstalls OpenShift and
-# Atomic Enterprise RPMs. It is primarily intended for use
-# with the tutorial as well as for developers to reset state.
-#
----
-- include: uninstall.yml
-
-- hosts:
- - OSEv3:children
-
- become: yes
-
- tasks:
- - shell: docker ps -a -q | xargs docker stop
- changed_when: False
- failed_when: False
-
- - shell: docker ps -a -q| xargs docker rm
- changed_when: False
- failed_when: False
-
- - shell: docker images -q |xargs docker rmi
- changed_when: False
- failed_when: False
-
- - user: name={{ item }} state=absent remove=yes
- with_items:
- - alice
- - joe
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 5072d10fa..07f10d48c 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -1,5 +1,5 @@
-# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
-# Enterprise content installed by ansible. This includes:
+# This deletes *ALL* Origin and OpenShift Enterprise content installed by
+# ansible. This includes:
#
# configuration
# containers
@@ -41,7 +41,6 @@
- name: Stop services
service: name={{ item }} state=stopped
with_items:
- - atomic-enterprise-node
- atomic-openshift-node
- openshift-node
- openvswitch
@@ -54,7 +53,6 @@
- name: Stop services
service: name={{ item }} state=stopped
with_items:
- - atomic-enterprise-master
- atomic-openshift-master
- atomic-openshift-master-api
- atomic-openshift-master-controllers
@@ -104,9 +102,6 @@
- name: Remove packages
package: name={{ item }} state=absent
with_items:
- - atomic-enterprise
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- atomic-openshift
- atomic-openshift-clients
- atomic-openshift-excluder
@@ -129,8 +124,6 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - tuned-profiles-atomic-openshift-node
- tuned-profiles-openshift-node
- tuned-profiles-origin-node
@@ -165,7 +158,6 @@
failed_when: False
with_items:
- openshift-enterprise
- - atomic-enterprise
- origin
- shell: atomic uninstall "{{ item }}"-master-controllers
@@ -173,7 +165,6 @@
failed_when: False
with_items:
- openshift-enterprise
- - atomic-enterprise
- origin
- shell: atomic uninstall "{{ item }}"-master
@@ -181,7 +172,6 @@
failed_when: False
with_items:
- openshift-enterprise
- - atomic-enterprise
- origin
- shell: atomic uninstall "{{ item }}"-node
@@ -189,7 +179,6 @@
failed_when: False
with_items:
- openshift-enterprise
- - atomic-enterprise
- origin
- shell: atomic uninstall "{{ item }}"
@@ -202,18 +191,11 @@
- shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
changed_when: False
failed_when: False
with_items:
- openshift-enterprise
- - atomic-enterprise
- origin
- shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
@@ -221,9 +203,6 @@
failed_when: False
register: exited_containers_to_delete
with_items:
- - aep3.*/aep
- - aep3.*/node
- - aep3.*/openvswitch
- openshift3/ose
- openshift3/node
- openshift3/openvswitch
@@ -242,7 +221,6 @@
register: images_to_delete
with_items:
- registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- registry\.qe\.openshift\.com/.*
- registry\.access\..*redhat\.com/rhel7/etcd
- docker.io/openshift
@@ -290,10 +268,8 @@
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- - /etc/atomic-enterprise
- /etc/openshift
- /etc/openshift-sdn
- - /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- /etc/sysconfig/openshift-node-dep
@@ -308,8 +284,6 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
- - /var/lib/atomic-enterprise
- - /var/lib/openshift
- shell: systemctl daemon-reload
changed_when: False
@@ -347,8 +321,6 @@
package: name={{ item }} state=absent
when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- - atomic-enterprise
- - atomic-enterprise-master
- atomic-openshift
- atomic-openshift-clients
- atomic-openshift-excluder
@@ -417,7 +389,6 @@
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- - /etc/atomic-enterprise
- /etc/corosync
- /etc/openshift
- /etc/openshift-sdn
@@ -428,9 +399,6 @@
- /etc/systemd/system/origin-master-api.service
- /etc/systemd/system/origin-master-controllers.service
- /etc/systemd/system/openvswitch.service
- - /etc/sysconfig/atomic-enterprise-master
- - /etc/sysconfig/atomic-enterprise-master-api
- - /etc/sysconfig/atomic-enterprise-master-controllers
- /etc/sysconfig/atomic-openshift-master-api
- /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/origin-master
@@ -441,8 +409,6 @@
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /usr/share/openshift/examples
- - /var/lib/atomic-enterprise
- - /var/lib/openshift
- /var/lib/pacemaker
- /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md
new file mode 100644
index 000000000..468264a9a
--- /dev/null
+++ b/playbooks/aws/BUILD_AMI.md
@@ -0,0 +1,21 @@
+# Build AMI
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1. Create an instance, using a specified ssh key.
+2. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+3. Create the AMI.
+4. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+5. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md
new file mode 100644
index 000000000..4f428dcc3
--- /dev/null
+++ b/playbooks/aws/PREREQUISITES.md
@@ -0,0 +1,40 @@
+# Prerequisites
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1) vpc
+2) security group to build the AMI in.
+3) ssh keys to log into instances
+
+These items can be provisioned ahead of time, or you can utilize the plays here
+to create these items.
+
+If you wish to provision these items yourself, or you already have these items
+provisioned and wish to utilize existing components, please refer to
+provisioning_vars.yml.example.
+
+If you wish to have these items created for you, continue with this document.
+
+# Running prerequisites.yml
+
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+## Step 1:
+Ensure you have specified all the necessary provisioning variables. See
+provisioning_vars.example.yml and README.md for more information.
+
+## Step 2:
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
+```
+
+This will create a VPC, security group, and ssh_key. These plays are idempotent,
+and multiple runs should result in no additional provisioning of these components.
+
+You can also verify that you will successfully utilize existing components with
+these plays.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 2b3d4329e..fbab61189 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS
deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
custom AMIs.
+To speed in the provisioning of medium and large clusters, openshift-node
+instances are created using a pre-built AMI. A list of pre-built AMIs will
+be available soon.
+
+If the deployer wishes to build their own AMI for provisioning, instructions
+to do so are provided here.
+
### Where do I start?
Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
@@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
-The newly added playbooks are the following:
-- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+#### High-level overview
+- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information.
+- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information.
- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
- install.yml - Calls the openshift-ansible installer on the newly created instances
- provision_nodes.yml - Creates the infra and compute node scale groups
@@ -41,87 +53,38 @@ The newly added playbooks are the following:
The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
+Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars
+under the appropriate groups. Most variables can exist in the 'all' group.
+
```yaml
---
-# when creating an AMI set this to True
-# when installing a cluster set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-# openshift_aws_clusterid: default
-
-# specify a region
-# openshift_aws_region: us-east-1
-
-# must specify a base_ami when building an AMI
-# openshift_aws_base_ami: # base image for AMI to build from
-# specify when using a custom AMI
-# openshift_aws_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-# openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
-# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
-# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-
-# This is required for any ec2 instances
-# openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
+# Minimum mandatory provisioning variables. See provisioning_vars.yml.example.
+# for more information.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+openshift_release: # example: v3.7
+openshift_pkg_version: # example: -3.7.0
+openshift_aws_ssh_key_name: # example: myuser_key
+openshift_aws_base_ami: # example: ami-12345678
+openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
-
-```ini
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:vars]
-################################################################################
-# Ensure these variables are set for bootstrap
-################################################################################
-# openshift_deployment_type is required for installation
-openshift_deployment_type=origin
-
-# required when building an AMI. This will
-# be dependent on the version provided by the yum repository
-openshift_pkg_version=-3.6.0
-
-openshift_master_bootstrap_enabled=True
+In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
+using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-openshift_hosted_router_wait=False
-openshift_hosted_registry_wait=False
-
-# Repository for installation
-openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
-
-################################################################################
-# cluster specific settings maybe be placed here
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-[masters]
+#### Step 0 (optional)
-[etcd]
+You may provision a VPC, Security Group, and SSH keypair to build the AMI.
-[nodes]
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
```
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+See PREREQUISITES.md for more information.
#### Step 1
@@ -131,24 +94,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with
$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create a security group.
-4. Create an instance using the key from step 2 or a specified key.
-5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
-6. Create the AMI.
-7. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-8. Terminate the instance used to configure the AMI.
-
-More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
-```
-# openshift_aws_ami_encrypt: True # defaults to false
-```
-
-**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-
#### Step 2
Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
@@ -172,16 +117,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created.
-2. Ensures a SSH key exists.
-3. Creates an s3 bucket for the registry named $clusterid-docker-registry
-4. Create master security groups.
-5. Create a master launch config.
-6. Create the master auto scaling groups.
-7. If certificates are desired for ELB, they will be uploaded.
-8. Create internal and external master ELBs.
-9. Add newly created masters to the correct groups.
-10. Set a couple of important facts for the masters.
+1. Creates an s3 bucket for the registry named $clusterid-docker-registry
+2. Create master security groups.
+3. Create a master launch config.
+4. Create the master auto scaling groups.
+5. If certificates are desired for ELB, they will be uploaded.
+6. Create internal and external master ELBs.
+7. Add newly created masters to the correct groups.
+8. Set a couple of important facts for the masters.
At this point we have successfully created the infrastructure including the master nodes.
@@ -200,13 +143,13 @@ Once this playbook completes, the cluster masters should be installed and config
#### Step 5
-Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes:
```
$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
-Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6.
#### Step 6
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index fc11205d8..559a37cbe 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,62 +17,21 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: create an instance and prepare for ami
- include_role:
- name: openshift_aws
- tasks_from: build_ami.yml
- vars:
- openshift_aws_node_group_type: compute
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ instancesout.instances[0].public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
-
- - name: add host to nodes
- add_host:
- groups: nodes
- name: "{{ instancesout.instances[0].public_dns_name }}"
+- include: provision_instance.yml
+ vars:
+ openshift_aws_node_group_type: compute
+- hosts: nodes
+ gather_facts: False
+ tasks:
- name: set the user to perform installation
set_fact:
- ansible_ssh_user: root
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/evaluate_groups.yml
+ ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_facts.yml
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- include: ../../common/openshift-node/image_prep.yml
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_openshift_repos.yml
-
-- name: install node config
- include: ../../common/openshift-node/config.yml
-
-- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: seal the ami
- include_role:
- name: openshift_aws
- tasks_from: seal_ami.yml
- vars:
- openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
+- include: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index 86d58a68e..4d0bf9531 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -1,68 +1,19 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the master node group
hosts: localhost
tasks:
- - name: Alert user to variables needed - clusterid
- debug:
- msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
-
- - name: Alert user to variables needed - region
- debug:
- msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: setup_master_group.yml
- name: set the master facts for hostname to elb
hosts: masters
gather_facts: no
remote_user: root
tasks:
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ openshift_aws_clusterid | default('default') }}-master-external"
- - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: master_facts.yml
- name: normalize groups
include: ../../byo/openshift-cluster/initialize_groups.yml
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..df77fe3bc
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -0,0 +1,8 @@
+---
+- include: provision_vpc.yml
+
+- include: provision_ssh_keypair.yml
+
+- include: provision_sec_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index db7afac6f..4b5bd22ea 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,5 +1,5 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the elb and the master node group
hosts: localhost
tasks:
@@ -11,7 +11,7 @@
debug:
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: create default vpc
+ - name: provision cluster
include_role:
name: openshift_aws
tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
new file mode 100644
index 000000000..6e843453c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
new file mode 100644
index 000000000..039357adb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -0,0 +1,13 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
new file mode 100644
index 000000000..3ec683958
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: ssh_keys.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
new file mode 100644
index 000000000..0a23a6d32
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create a vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
deleted file mode 100644
index 28eb9c993..000000000
--- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# when creating an AMI set this option to True
-# when installing the cluster, set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-#openshift_aws_clusterid: default
-
-# must specify a base_ami when building an AMI
-#openshift_aws_base_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-#openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
-#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
-
-# This is required for any ec2 instances
-#openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
new file mode 100644
index 000000000..8239a64fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_ami_name' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
new file mode 100644
index 000000000..238a7eb2f
--- /dev/null
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -0,0 +1,25 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
new file mode 100644
index 000000000..aa91363ae
--- /dev/null
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -0,0 +1,120 @@
+---
+# Variables that are commented in this file are optional; uncommented variables
+# are mandatory.
+
+# Default values for each variable are provided, as applicable.
+# Example values for mandatory variables are provided as a comment at the end
+# of the line.
+
+# ------------------------ #
+# Common/Cluster Variables #
+# ------------------------ #
+# Variables in this section affect all areas of the cluster
+
+# Deployment type must be specified.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+
+# openshift_release must be specified. Use whatever version of openshift
+# that is supported by openshift-ansible that you wish.
+openshift_release: # v3.7
+
+# This will be dependent on the version provided by the yum repository
+openshift_pkg_version: # -3.7.0
+
+# specify a clusterid
+# This value is also used as the default value for many other components.
+#openshift_aws_clusterid: default
+
+# AWS region
+# This value will instruct the plays where all items should be created.
+# Multi-region deployments are not supported using these plays at this time.
+#openshift_aws_region: us-east-1
+
+#openshift_aws_create_launch_config: true
+#openshift_aws_create_scale_group: true
+
+# --- #
+# VPC #
+# --- #
+
+# openshift_aws_create_vpc defaults to true. If you don't wish to provision
+# a vpc, set this to false.
+#openshift_aws_create_vpc: true
+
+# Name of the vpc. Needs to be set if using a pre-existing vpc.
+#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
+# vpc + subnet.
+#openshift_aws_subnet_name:
+
+# -------------- #
+# Security Group #
+# -------------- #
+
+# openshift_aws_create_security_groups defaults to true. If you wish to use
+# an existing security group, set this to false.
+#openshift_aws_create_security_groups: true
+
+# openshift_aws_build_ami_group is the name of the security group to build the
+# ami in. This defaults to the value of openshift_aws_clusterid.
+#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+
+# openshift_aws_launch_config_security_groups specifies the security groups to
+# apply to the launch config. The launch config security groups will be what
+# the cluster actually is deployed in.
+#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml
+
+# openshift_aws_node_security_groups are created when
+# openshift_aws_create_security_groups is set to true.
+#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml
+
+# -------- #
+# ssh keys #
+# -------- #
+
+# Specify the key pair name here to connect to the provisioned instances. This
+# can be an existing key, or it can be one of the keys specified in
+# openshift_aws_users
+openshift_aws_ssh_key_name: # myuser_key
+
+# This will ensure these user and public keys are created.
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
+
+# When building the AMI, specify the user to ssh to the instance as.
+# openshift_aws_build_ami_ssh_user: root
+
+# --------- #
+# AMI Build #
+# --------- #
+# Variables in this section apply to building a node AMI for use in your
+# openshift cluster.
+
+# must specify a base_ami when building an AMI
+openshift_aws_base_ami: # ami-12345678
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# -- #
+# S3 #
+# -- #
+
+# Create an s3 bucket.
+#openshift_aws_create_s3: True
+
+# --- #
+# ELB #
+# --- #
+
+# openshift_aws_elb_name will be the base-name of the ELBs.
+#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}"
+
+# custom certificates are required for the ELB
+openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-cluster/openshift-hosted.yml b/playbooks/byo/openshift-cluster/openshift-hosted.yml
new file mode 100644
index 000000000..edd4c8d7b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-hosted.yml
@@ -0,0 +1,6 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-cluster/openshift_hosted.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
index 15917078d..4d3f7f42c 100644
--- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml
+++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
@@ -1,4 +1,6 @@
---
- include: initialize_groups.yml
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index a3894e243..255b0dbf7 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -7,13 +7,31 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
+ vars:
+ g_check_expiry_hosts: 'oo_etcd_to_config'
-- include: ../../common/openshift-cluster/redeploy-certificates/masters.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml
+- include: ../../common/openshift-etcd/certificates.yml
+ vars:
+ etcd_certificates_redeploy: true
+
+- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
+
+- include: ../../common/openshift-master/certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
+
+- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+
+- include: ../../common/openshift-node/certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
- include: ../../common/openshift-etcd/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
index 8516baee8..f4f2ce00d 100644
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
@@ -7,8 +7,18 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
+ vars:
+ g_check_expiry_hosts: 'oo_etcd_to_config'
+
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
+
+- include: ../../common/openshift-etcd/certificates.yml
+ vars:
+ etcd_certificates_redeploy: true
- include: ../../common/openshift-etcd/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
index 566e8b261..049bad8e7 100644
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
@@ -7,6 +7,10 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/masters.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
+
+- include: ../../common/openshift-master/certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
index 42777e5e6..345b0c689 100644
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
@@ -7,6 +7,10 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+
+- include: ../../common/openshift-node/certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
- include: ../../common/openshift-node/restart.yml
diff --git a/playbooks/byo/openshift-etcd/certificates.yml b/playbooks/byo/openshift-etcd/certificates.yml
new file mode 100644
index 000000000..e35cf243f
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/certificates.yml
@@ -0,0 +1,8 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/ca.yml
+
+- include: ../../common/openshift-etcd/certificates.yml
diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..6690a7624
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/embedded2external.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/embedded2external.yml
diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..32c828f97
--- /dev/null
+++ b/playbooks/byo/openshift-loadbalancer/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-loadbalancer/config.yml
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-management/config.yml
index 0e8e7a94d..33a555cc1 100644
--- a/playbooks/byo/openshift-cfme/config.yml
+++ b/playbooks/byo/openshift-management/config.yml
@@ -5,4 +5,4 @@
- include: ../../common/openshift-cluster/evaluate_groups.yml
-- include: ../../common/openshift-cfme/config.yml
+- include: ../../common/openshift-management/config.yml
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
index c8ed16859..ebd6fb261 100644
--- a/playbooks/byo/openshift-cfme/uninstall.yml
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -3,4 +3,4 @@
# tags:
# - always
-- include: ../../common/openshift-cfme/uninstall.yml
+- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml
new file mode 100644
index 000000000..e147dcba1
--- /dev/null
+++ b/playbooks/byo/openshift-master/certificates.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/certificates.yml
diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml
new file mode 100644
index 000000000..93b24411e
--- /dev/null
+++ b/playbooks/byo/openshift-nfs/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-nfs/config.yml
diff --git a/playbooks/byo/openshift-node/certificates.yml b/playbooks/byo/openshift-node/certificates.yml
new file mode 100644
index 000000000..3d2de74a9
--- /dev/null
+++ b/playbooks/byo/openshift-node/certificates.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/certificates.yml
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 1b14ff32e..bc3109a31 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,9 +8,9 @@
hosts: OSEv3
roles:
- role: rhel_subscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
+ when:
+ - deployment_type == 'openshift-enterprise'
+ - ansible_distribution == "RedHat"
+ - lookup('env', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false']
+ - role: openshift_repos
+ - role: os_update_latest
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
deleted file mode 100644
index 533a35d9e..000000000
--- a/playbooks/common/openshift-cfme/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# TODO: Make this work. The 'name' variable below is undefined
-# presently because it's part of the cfme role. This play can't run
-# until that's re-worked.
-#
-# - name: Pre-Pull manageiq-pods docker images
-# hosts: nodes
-# tasks:
-# - name: Ensure the latest manageiq-pods docker image is pulling
-# docker_image:
-# name: "{{ openshift_cfme_container_image }}"
-# # Fire-and-forget method, never timeout
-# async: 99999999999
-# # F-a-f, never check on this. True 'background' task.
-# poll: 0
-
-- name: Configure Masters for CFME Bulk Image Imports
- hosts: oo_masters_to_config
- serial: 1
- tasks:
- - name: Run master cfme tuning playbook
- include_role:
- name: openshift_cfme
- tasks_from: tune_masters
-
-- name: Setup CFME
- hosts: oo_first_master
- vars:
- r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
- pre_tasks:
- - name: Create a temporary place to evaluate the PV templates
- command: mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: r_openshift_cfme_mktemp
- changed_when: false
- - name: Ensure the server template was read from disk
- debug:
- msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
-
- tasks:
- - name: Run the CFME Setup Role
- include_role:
- name: openshift_cfme
- vars:
- template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
new file mode 100644
index 000000000..5ddafdb07
--- /dev/null
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - cockpit-ui
+ hosts: oo_first_master
+ roles:
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index bbd5a0185..dbe09dce2 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -8,7 +8,10 @@
vars:
- r_openshift_health_checker_playbook_context: install
post_tasks:
- - action: openshift_health_check
+
+ - name: Verify Requirements - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
args:
checks:
- disk_availability
@@ -17,48 +20,48 @@
- package_version
- docker_image_availability
- docker_storage
-
-- include: initialize_oo_option_facts.yml
- tags:
- - always
-
-- name: Set hostname
- hosts: oo_masters_to_config:oo_nodes_to_config
- tasks:
- # TODO: switch back to hostname module once we depend on ansible-2.4
- # https://github.com/ansible/ansible/pull/25906
- - name: Set hostname
- command: "hostnamectl set-hostname {{ openshift.common.hostname }}"
- when: openshift_set_hostname | default(false,true) | bool
+ - name: Verify Requirements - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
- include: ../openshift-etcd/config.yml
- include: ../openshift-nfs/config.yml
- tags:
- - nfs
+ when: groups.oo_nfs_to_config | default([]) | count > 0
- include: ../openshift-loadbalancer/config.yml
- tags:
- - loadbalancer
+ when: groups.oo_lb_to_config | default([]) | count > 0
- include: ../openshift-master/config.yml
- include: ../openshift-master/additional_config.yml
- include: ../openshift-node/config.yml
- tags:
- - node
- include: ../openshift-glusterfs/config.yml
- tags:
- - glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
- include: openshift_hosted.yml
- tags:
- - hosted
+
+- include: openshift_metrics.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- include: openshift_logging.yml
+ when: openshift_logging_install_logging | default(false) | bool
- include: service_catalog.yml
- when:
- - openshift_enable_service_catalog | default(false) | bool
- tags:
- - servicecatalog
+ when: openshift_enable_service_catalog | default(false) | bool
+
+- include: openshift_management.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
new file mode 100644
index 000000000..8a60a30b8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -0,0 +1,9 @@
+---
+- name: Create Hosted Resources - persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 16a733899..e55b2f964 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -43,11 +43,14 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- No etcd hosts defined. Running an all-in-one master is deprecated and
- will no longer be supported in a future upgrade.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
when:
- - g_etcd_hosts | default([]) | length == 0
- - not openshift_master_unsupported_all_in_one | default(False)
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
- not openshift_node_bootstrap | default(False)
- name: Evaluate oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 0723575c2..be2f8b5f4 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -93,8 +93,8 @@
state: present
with_items:
- iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}"
- - PyYAML
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- yum-utils
- name: Ensure various deps for running system containers are installed
diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
deleted file mode 100644
index ac3c702a0..000000000
--- a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Set oo_option facts
- hosts: oo_all_hosts
- tags:
- - always
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
- - set_fact:
- openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
- when: openshift_docker_selinux_enabled is not defined
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7af6b25bc..e6400ea61 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,4 +1,15 @@
---
+# openshift_install_base_package_group may be set in a play variable to limit
+# the host groups the base package is installed on. This is currently used
+# for master/control-plane upgrades.
+- name: Set version_install_base_package true on masters and nodes
+ hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
+ tasks:
+ - name: Set version_install_base_package true
+ set_fact:
+ version_install_base_package: True
+ when: version_install_base_package is not defined
+
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
@@ -8,8 +19,8 @@
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
-- name: Set openshift_version for all hosts
- hosts: oo_all_hosts:!oo_first_master
+- name: Set openshift_version for etcd, node, and master hosts
+ hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
pre_tasks:
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
new file mode 100644
index 000000000..4b4f19690
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - openshift_default_storage_class
+ hosts: oo_first_master
+ roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 75339f6df..c1536eb36 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,75 +1,35 @@
---
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+- name: Hosted Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_hosted: "In Progress"
+ aggregate: false
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
- logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
- logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
- roles:
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- - role: openshift_hosted
- - role: openshift_metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool
- - role: openshift_logging
- when: openshift_hosted_logging_deploy | default(false) | bool
- openshift_hosted_logging_hostname: "{{ logging_hostname }}"
- openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
- openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
- openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}"
+- include: create_persistent_volumes.yml
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+- include: openshift_default_storage_class.yml
- - role: openshift_prometheus
- when: openshift_hosted_prometheus_deploy | default(false) | bool
+- include: openshift_hosted_create_projects.yml
-- name: Update master-config for publicLoggingURL
- hosts: oo_masters_to_config:!oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- - set_fact:
- openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- tasks:
+- include: openshift_hosted_router.yml
+
+- include: openshift_hosted_registry.yml
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
+- include: cockpit-ui.yml
- - block:
- - include_role:
- name: openshift_metrics
- tasks_from: update_master_config
- when: openshift_hosted_metrics_deploy | default(false) | bool
+- include: openshift_prometheus.yml
+ when: openshift_hosted_prometheus_deploy | default(False) | bool
+
+- name: Hosted Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'Complete'
+ set_stats:
+ data:
+ installer_phase_hosted: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
new file mode 100644
index 000000000..d5ca5185c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
@@ -0,0 +1,7 @@
+---
+- name: Create Hosted Resources - openshift projects
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_hosted
+ tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
new file mode 100644
index 000000000..2a91a827c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - registry
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: registry.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
new file mode 100644
index 000000000..bcb5a34a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - router
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: router.yml
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index c1a5d83cd..529a4c939 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,4 +1,14 @@
---
+- name: Logging Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_logging: "In Progress"
+ aggregate: false
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
@@ -11,4 +21,13 @@
- include_role:
name: openshift_logging
tasks_from: update_master_config
- when: openshift_logging_install_logging | default(false) | bool
+
+- name: Logging Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'Complete'
+ set_stats:
+ data:
+ installer_phase_logging: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml
new file mode 100644
index 000000000..6e582920b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_management.yml
@@ -0,0 +1,25 @@
+---
+- name: Management Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_Management: "In Progress"
+ aggregate: false
+
+- name: Management
+ include: ../openshift-management/config.yml
+
+- name: Management Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_Management: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 1dc180c26..9c0bd489b 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,8 +1,18 @@
---
+- name: Metrics Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_metrics: "In Progress"
+ aggregate: false
+
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- - openshift_metrics
+ - role: openshift_metrics
- name: OpenShift Metrics
hosts: oo_masters:!oo_first_master
@@ -12,3 +22,13 @@
include_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+
+- name: Metrics Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'Complete'
+ set_stats:
+ data:
+ installer_phase_metrics: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index a979c0c00..ac2d250a3 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,9 +1,5 @@
---
-- include: std_include.yml
-
-- name: OpenShift Prometheus
+- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
roles:
- - openshift_prometheus
- vars:
- openshift_prometheus_state: present
+ - role: openshift_prometheus
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
new file mode 100644
index 000000000..4a9fbf7eb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
@@ -0,0 +1,12 @@
+---
+- name: Check cert expirys
+ hosts: "{{ g_check_expiry_hosts }}"
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
new file mode 100644
index 000000000..d738c8207
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
@@ -0,0 +1,19 @@
+---
+- name: Backup and remove generated etcd certificates
+ hosts: oo_first_etcd
+ any_errors_fatal: true
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_generated_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_generated_certificates
+
+- name: Backup deployed etcd certificates
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_server_certificates
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
index 6964e8567..044875d1c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -13,34 +13,15 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - include_role:
+ name: etcd
+ tasks_from: backup_ca_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_ca_certificates
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+- include: ../../openshift-etcd/ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -55,52 +36,14 @@
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
+ - include_role:
+ name: etcd
+ tasks_from: distribute_ca
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- include: ../../openshift-etcd/restart.yml
# Do not restart etcd when etcd certificates were previously expired.
@@ -111,17 +54,13 @@
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
+ - include_role:
+ name: etcd
+ tasks_from: retrieve_ca_certificates
+ vars:
+ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Distribute etcd CA to masters
hosts: oo_masters_to_config
@@ -146,13 +85,19 @@
changed_when: false
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
deleted file mode 100644
index 6b5c805e6..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-- name: Backup and remove generated etcd certificates
- hosts: oo_first_etcd
- any_errors_fatal: true
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Determine if generated etcd certificates exist
- stat:
- path: "{{ etcd_conf_dir }}/generated_certs"
- register: etcd_generated_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/generated_certs
- args:
- warn: no
- when: etcd_generated_certs_dir_stat.stat.exists | bool
- - name: Remove generated etcd certificates
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ etcd_conf_dir }}/generated_certs"
-
-- name: Backup and removed deployed etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Backup etcd certificates
- command: >
- tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/ca.crt
- {{ etcd_conf_dir }}/server.crt
- {{ etcd_conf_dir }}/server.key
- {{ etcd_conf_dir }}/peer.crt
- {{ etcd_conf_dir }}/peer.key
- args:
- warn: no
-
-- name: Redeploy etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_server_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
-
-- name: Redeploy etcd client certificates for masters
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_client_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
new file mode 100644
index 000000000..4dbc041b0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
@@ -0,0 +1,38 @@
+---
+- name: Backup and remove master cerftificates
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ pre_tasks:
+ - stat:
+ path: "{{ openshift.common.config_base }}/generated-configs"
+ register: openshift_generated_configs_dir_stat
+ - name: Backup generated certificate and config directories
+ command: >
+ tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ openshift.common.config_base }}/generated-configs
+ {{ openshift.common.config_base }}/master
+ when: openshift_generated_configs_dir_stat.stat.exists
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+ - name: Remove generated certificate directories
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ openshift.common.config_base }}/generated-configs"
+ - name: Remove generated certificates
+ file:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ state: absent
+ with_items:
+ - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
+ - "etcd.server.crt"
+ - "etcd.server.key"
+ - "master.server.crt"
+ - "master.server.key"
+ - "openshift-master.crt"
+ - "openshift-master.key"
+ - "openshift-master.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
deleted file mode 100644
index 51b196299..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- name: Redeploy master certificates
- hosts: oo_masters_to_config
- any_errors_fatal: true
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- pre_tasks:
- - stat:
- path: "{{ openshift_generated_configs_dir }}"
- register: openshift_generated_configs_dir_stat
- - name: Backup generated certificate and config directories
- command: >
- tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
- {{ openshift_generated_configs_dir }}
- {{ openshift.common.config_base }}/master
- when: openshift_generated_configs_dir_stat.stat.exists
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
- - name: Remove generated certificate directories
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ openshift_generated_configs_dir }}"
- - name: Remove generated certificates
- file:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- state: absent
- with_items:
- - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- - "etcd.server.crt"
- - "etcd.server.key"
- - "master.server.crt"
- - "master.server.key"
- - "openshift-master.crt"
- - "openshift-master.key"
- - "openshift-master.kubeconfig"
- - name: Remove generated etcd client certificates
- file:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- state: absent
- with_items:
- - "master.etcd-client.crt"
- - "master.etcd-client.key"
- when: groups.oo_etcd_to_config | default([]) | length == 0
- roles:
- - role: openshift_master_certificates
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
- openshift_certificates_redeploy: true
- - role: lib_utils
- post_tasks:
- - yedit:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- key: servingInfo.namedCertificates
- value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
- when:
- - ('named_certificates' in openshift.master)
- - openshift.master.named_certificates | default([]) | length > 0
- - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
index 4990a03f2..2ad84b3b9 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
@@ -22,8 +22,3 @@
state: absent
with_items:
- "{{ openshift.common.config_base }}/node/ca.crt"
- roles:
- - role: openshift_node_certificates
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 089ae6bbc..2068ed199 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -7,7 +7,7 @@
when: not openshift.common.version_gte_3_2_or_1_2 | bool
- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config
+ hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -44,8 +44,8 @@
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt'
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
@@ -105,25 +105,27 @@
- "ca.serial.txt"
- "ca-bundle.crt"
-- name: Generate new OpenShift CA certificate
+- name: Create temporary directory for creating new CA certificate
hosts: oo_first_master
- pre_tasks:
+ tasks:
- name: Create temporary directory for creating new CA certificate
command: >
mktemp -d /tmp/openshift-ansible-XXXXXXX
register: g_new_openshift_ca_mktemp
changed_when: false
- roles:
- - role: openshift_ca
+
+- name: Create OpenShift CA
+ hosts: oo_first_master
+ vars:
# Set openshift_ca_config_dir to a temporary directory where CA
# will be created. We'll replace the existing CA with the CA
# created in the temporary directory.
- openshift_ca_config_dir: "{{ g_new_openshift_ca_mktemp.stdout }}"
+ openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -209,16 +211,22 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -268,13 +276,28 @@
changed_when: false
- include: ../../openshift-node/restart.yml
- # Do not restart nodes when node certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # Do not restart nodes when node, master or etcd certificates were previously expired.
+ when:
+ # nodes
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 748bbbf91..2116c745c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -7,23 +7,34 @@
tasks:
- name: Create temp directory for kubeconfig
command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
+ register: router_cert_redeploy_tempdir
changed_when: false
+
- name: Copy admin client config(s)
command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
changed_when: false
- name: Determine if router exists
command: >
{{ openshift.common.client_binary }} get dc/router -o json
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_dc
failed_when: false
changed_when: false
- - set_fact:
+ - name: Determine if router service exists
+ command: >
+ {{ openshift.common.client_binary }} get svc/router -o json
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
+ -n default
+ register: l_router_svc
+ failed_when: false
+ changed_when: false
+
+ - name: Collect router environment variables and secrets
+ set_fact:
router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
| oo_collect('name'))
| default([]) }}"
@@ -34,20 +45,32 @@
changed_when: false
when: l_router_dc.rc == 0
+ - name: Collect router service annotations
+ set_fact:
+ router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}"
+ when: l_router_svc.rc == 0
+
- name: Update router environment variables
shell: >
{{ openshift.common.client_binary }} env dc/router
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in router_env_vars and 'OPENSHIFT_CERT_DATA' in router_env_vars and 'OPENSHIFT_KEY_DATA' in router_env_vars
+ when:
+ - l_router_dc.rc == 0
+ - ('OPENSHIFT_CA_DATA' in router_env_vars)
+ - ('OPENSHIFT_CERT_DATA' in router_env_vars)
+ - ('OPENSHIFT_KEY_DATA' in router_env_vars)
+ # When the router service contains service signer annotations we
+ # will delete the existing certificate secret and allow OpenShift to
+ # replace the secret.
- block:
- name: Delete existing router certificate secret
oc_secret:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig"
name: router-certs
namespace: default
state: absent
@@ -58,86 +81,61 @@
{{ openshift.common.client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name-
service.alpha.openshift.io/serving-cert-signed-by-
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- name: Add serving-cert-secret annotation to router service
command: >
{{ openshift.common.client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name=router-certs
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - openshift_hosted_router_certificate is undefined
+ - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations
- - block:
- - assert:
- that:
- - "'certfile' in openshift_hosted_router_certificate"
- - "'keyfile' in openshift_hosted_router_certificate"
- - "'cafile' in openshift_hosted_router_certificate"
- msg: |-
- openshift_hosted_router_certificate has been set in the inventory but is
- missing one or more required keys. Ensure that 'certfile', 'keyfile',
- and 'cafile' keys have been specified for the openshift_hosted_router_certificate
- inventory variable.
-
- - name: Read router certificate and key
- become: no
- local_action:
- module: slurp
- src: "{{ item }}"
- register: openshift_router_certificate_output
- # Defaulting dictionary keys to none to avoid deprecation warnings
- # (future fatal errors) during template evaluation. Dictionary keys
- # won't be accessed unless openshift_hosted_router_certificate is
- # defined and has all keys (certfile, keyfile, cafile) which we
- # check above.
- with_items:
- - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
-
- - name: Write temporary router certificate file
- copy:
- content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
- dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- mode: 0600
-
- - name: Write temporary router key file
- copy:
- content: "{{ (openshift_router_certificate_output.results
- | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
- dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- mode: 0600
-
- - name: Replace router-certs secret
- shell: >
- {{ openshift.common.client_binary }} secrets new router-certs
- tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- --type=kubernetes.io/tls
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --confirm
- -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
+ # When there are no annotations on the router service we will allow
+ # the openshift_hosted role to either create a new wildcard
+ # certificate (since we deleted the original) or reapply a custom
+ # openshift_hosted_router_certificate.
+ - file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/origin/master/openshift-router.crt
+ - /etc/origin/master/openshift-router.key
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- - name: Remove temporary router certificate and key files
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
+ - include_role:
+ name: openshift_hosted
+ tasks_from: main
+ vars:
+ openshift_hosted_manage_registry: false
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- name: Redeploy router
command: >
{{ openshift.common.client_binary }} deploy dc/router
--latest
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- name: Delete temp directory
file:
- name: "{{ mktemp.stdout }}"
+ name: "{{ router_cert_redeploy_tempdir.stdout }}"
state: absent
changed_when: False
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 599350258..bd964b2ce 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,20 +1,29 @@
---
-
-- name: Update Master configs
- hosts: oo_masters
- serial: 1
+- name: Service Catalog Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
tasks:
- - block:
- - include_role:
- name: openshift_service_catalog
- tasks_from: wire_aggregator
- vars:
- first_master: "{{ groups.oo_first_master[0] }}"
+ - name: Set Service Catalog install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "In Progress"
+ aggregate: false
- name: Service Catalog
hosts: oo_first_master
roles:
- openshift_service_catalog
- ansible_service_broker
+ - template_service_broker
vars:
first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Service Catalog install 'Complete'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index cef0072f3..45b34c8bd 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,4 +1,16 @@
---
+- name: Initialization Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ set_stats:
+ data:
+ installer_phase_initialize: "In Progress"
+ aggregate: false
+
- include: evaluate_groups.yml
tags:
- always
@@ -22,3 +34,13 @@
- include: initialize_openshift_version.yml
tags:
- always
+
+- name: Initialization Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ set_stats:
+ data:
+ installer_phase_initialize: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 2cc6c9019..d086cad00 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -3,12 +3,15 @@
hosts: oo_etcd_hosts_to_backup
roles:
- role: openshift_etcd_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_backup_tag: etcd_backup_tag
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -20,7 +23,7 @@
| oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 64abc54e7..5b8ba3bb2 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -15,10 +15,15 @@
hosts: oo_etcd_hosts_to_upgrade
tasks:
- include_role:
- name: etcd_common
- vars:
- r_etcd_common_action: drop_etcdctl
+ name: etcd
+ tasks_from: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
when: openshift_etcd_upgrade | default(true) | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ etcd_backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 39e82498d..d71c96cd7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -98,13 +98,11 @@
serial: 1
tasks:
- include_role:
- name: etcd_upgrade
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- ansible_distribution == 'Fedora'
- not openshift.common.is_containerized | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- etcd_backup_tag: "post-3.0-"
- when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
index 831ca8f57..e5e895775 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: image
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
index 2e79451e0..a2a26bad4 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: rpm
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_rpm
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index c98065cf4..2826951e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,8 +5,6 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_oo_option_facts.yml
-
- include: ../initialize_facts.yml
- name: Ensure firewall is not switched during upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index d9ddf3860..122066955 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -90,10 +90,12 @@
# openshift_examples from failing when trying to replace templates that do
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- - openshift_examples
+ - role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
- openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
- role: openshift_hosted_templates
@@ -101,9 +103,16 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
@@ -119,12 +128,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
new file mode 100644
index 000000000..f75ae3b15
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
@@ -0,0 +1,22 @@
+---
+- name: Verify all masters has etcd3 storage backend set
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - lib_utils
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index 497709d25..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,13 +1,16 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
- memory_availability
+ - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
index 4c345dbe8..3c0017891 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -5,24 +5,9 @@
tasks:
- fail:
msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
+ This upgrade is only supported for origin and openshift-enterprise
deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
- # required when upgrading to avoid changes that may occur between releases
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
- - assert:
- that:
- - "osm_cluster_network_cidr is defined"
- - "osm_host_subnet_length is defined"
- - "openshift_portal_net is defined"
- msg: >
- osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
- variables when upgrading. These variables should match what is currently used in the cluster. If
- you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml
- on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr),
- hostSubnetLength (osm_host_subnet_length), and serviceNetworkCIDR (openshift_portal_net).
+ when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9b4a8e413..142ce5f3d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -27,13 +27,17 @@
- name: Set fact avail_openshift_version
set_fact:
- avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ - name: Set openshift_pkg_version when not specified
+ set_fact:
+ openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ when: openshift_pkg_version | default('') == ''
- name: Verify OpenShift RPMs are available for upgrade
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 164baca81..8cc46ab68 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -8,7 +8,6 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-
- name: Upgrade master packages
package: name={{ master_pkgs | join(',') }} state=present
vars:
@@ -16,7 +15,7 @@
- "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- PyYAML
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index b75aae589..c37a5f9ab 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -31,7 +31,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -175,7 +174,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
@@ -189,18 +188,18 @@
roles:
- { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
+ when: not openshift.common.version_gte_3_7 | bool
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -215,7 +214,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ when: not openshift.common.version_gte_3_7 | bool
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -230,7 +229,45 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: openshift.common.version_gte_3_4_or_1_4 | bool
+ when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+
+ - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool)
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
- name: Reconcile Security Context Constraints
command: >
@@ -269,7 +306,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index 54c85f0fb..f64f0e003 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -68,6 +68,7 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d7cb38d03..43da5b629 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -68,6 +68,7 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index a3d0d6305..30e719d8f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -47,6 +47,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,10 +75,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 5fee56615..e9cec9220 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -55,6 +55,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -68,6 +72,7 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -75,10 +80,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 51acd17da..920dc2ffc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -47,6 +47,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,14 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 9fe059ac9..27d8515dc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -55,6 +55,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -68,6 +76,7 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -75,10 +84,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 1b10d4e37..ba6fcc3f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -48,6 +48,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_node_excluders.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index ed89dbe8d..df59a8782 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -14,3 +14,8 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'kubernetesMasterConfig.admissionConfig'
yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 9ec40723a..f1ca1edb9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -21,6 +21,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos and initialize facts on all hosts
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -47,6 +51,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -71,14 +83,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index f97f34c3b..6c4f9671b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -29,6 +29,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -55,6 +59,14 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -68,6 +80,7 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -75,10 +88,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index e95b90cd5..bc080f9a3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -48,6 +48,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_node_excluders.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/common/openshift-etcd/ca.yml
new file mode 100644
index 000000000..ac5543be9
--- /dev/null
+++ b/playbooks/common/openshift-etcd/ca.yml
@@ -0,0 +1,15 @@
+---
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_facts
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: ca
+ vars:
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ when:
+ - etcd_ca_setup | default(True) | bool
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
new file mode 100644
index 000000000..eb6b94f33
--- /dev/null
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -0,0 +1,4 @@
+---
+- include: server_certificates.yml
+
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index f2b85eea1..48d46bbb0 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,4 +1,18 @@
---
+- name: etcd Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_etcd: "In Progress"
+ aggregate: false
+
+- include: ca.yml
+
+- include: certificates.yml
+
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
@@ -10,3 +24,13 @@
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
+
+- name: etcd Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'Complete'
+ set_stats:
+ data:
+ installer_phase_etcd: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..9264f3c32
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index a2af7bb21..2456ad3a8 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,11 +1,13 @@
---
- name: Run pre-checks
hosts: oo_etcd_to_migrate
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: check
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.pre_check
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
- name: Prepare masters for etcd data migration
@@ -28,12 +30,15 @@
gather_facts: no
roles:
- role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -45,7 +50,7 @@
| oo_select_keys(groups.oo_etcd_to_migrate)
| oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
- fail:
msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when:
@@ -65,25 +70,28 @@
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
gather_facts: no
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: migrate
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
- name: Clean data stores on remaining etcd hosts
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: clean_data
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
- post_tasks:
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
- name: Add etcd hosts
delegate_to: localhost
add_host:
@@ -108,25 +116,27 @@
| oo_select_keys(groups.oo_etcd_to_migrate)
| oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- set_fact:
- etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
- name: Add TTLs on the first master
hosts: oo_first_master[0]
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: add_ttls
- etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.add_ttls
+ vars:
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
when: etcd_migration_failed | length == 0
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: configure
- when: etcd_migration_failed | length == 0
tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.configure_master
+ when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
when:
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index af1ef245a..5eaea5ae8 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -7,3 +7,21 @@
service:
name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - name: stop etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+ - name: start etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 5f8bb1c7a..b5ba2bbba 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -20,13 +20,23 @@
/usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
--key-file {{ etcd_peer_key_file }}
--ca-file {{ etcd_peer_ca_file }}
- -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }}
member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
register: etcd_add_check
retries: 3
delay: 10
until: etcd_add_check.rc == 0
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
roles:
- role: os_firewall
when: etcd_add_check.rc == 0
@@ -53,3 +63,21 @@
retries: 3
delay: 30
until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+ roles:
+ - role: openshift_master_facts
+ post_tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index d9de578f3..80cda9e21 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,4 +1,14 @@
---
+- name: GlusterFS Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "In Progress"
+ aggregate: false
+
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
tasks:
@@ -24,3 +34,13 @@
include_role:
name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- name: GlusterFS Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index 09ed81a83..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,4 +1,23 @@
---
+- name: Load Balancer Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "In Progress"
+ aggregate: false
+
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -14,5 +33,15 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
+ - role: tuned
+
+- name: Load Balancer Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'Complete'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
new file mode 100644
index 000000000..0aaafe440
--- /dev/null
+++ b/playbooks/common/openshift-management/config.yml
@@ -0,0 +1,15 @@
+---
+- name: Setup CFME
+ hosts: oo_first_master
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_management_mktemp
+ changed_when: false
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_management
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/common/openshift-cfme/filter_plugins
+++ b/playbooks/common/openshift-management/filter_plugins
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library
index ba40d2f56..ba40d2f56 120000
--- a/playbooks/common/openshift-cfme/library
+++ b/playbooks/common/openshift-management/library
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-management/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-cfme/roles
+++ b/playbooks/common/openshift-management/roles
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 78b8e7668..698d93405 100644
--- a/playbooks/common/openshift-cfme/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -4,5 +4,5 @@
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
- name: openshift_cfme
+ name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index 7468c78f0..1b3eb268a 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,4 +1,14 @@
---
+- name: Master Additional Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master_additional: "In Progress"
+ aggregate: false
+
- name: Additional master configuration
hosts: oo_first_master
vars:
@@ -10,14 +20,27 @@
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
+ when: openshift_install_examples | default(true, true) | bool
registry_url: "{{ openshift.master.registry_url }}"
- when: openshift_install_examples | default(True)
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
when: openshift_use_manageiq | default(false) | bool
- role: cockpit
- when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
+ when:
+ - openshift.common.is_atomic
+ - deployment_type == 'openshift-enterprise'
+ - osm_use_cockpit is undefined or osm_use_cockpit | bool
+ - openshift.common.deployment_subtype != 'registry'
- role: flannel_register
when: openshift_use_flannel | default(false) | bool
+
+- name: Master Additional Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master_additional: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml
new file mode 100644
index 000000000..f6afbc36f
--- /dev/null
+++ b/playbooks/common/openshift-master/certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create OpenShift certificates for master hosts
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
+ - role: openshift_master_certificates
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index c77d7bb87..6e57f282e 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,4 +1,16 @@
---
+- name: Master Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master: "In Progress"
+ aggregate: false
+
+- include: certificates.yml
+
- name: Disable excluders
hosts: oo_masters_to_config
gather_facts: no
@@ -9,9 +21,6 @@
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
- vars:
- t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
-
pre_tasks:
# Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
#
@@ -35,40 +44,21 @@
file:
path: "/etc/origin/{{ item }}"
state: absent
- when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ when:
+ - rpmgenerated_config.stat.exists == true
+ - deployment_type == 'openshift-enterprise'
with_items:
- master
- node
- .config_managed
- set_fact:
- openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
- when: openshift_master_pod_eviction_timeout is not defined
-
- - set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config']
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
-
- - set_fact:
- openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
- when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
-
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- - set_fact:
- openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}"
- when: openshift_hosted_metrics_deploy is not defined
- - set_fact:
- openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"
- when: openshift_hosted_metrics_duration is not defined
- - set_fact:
- openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
- when: openshift_hosted_metrics_resolution is not defined
roles:
- openshift_facts
post_tasks:
@@ -179,44 +169,54 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
- role: openshift_master
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
+ - role: nuage_ca
+ when: openshift_use_nuage | default(false) | bool
+ - role: nuage_common
+ when: openshift_use_nuage | default(false) | bool
- role: nuage_master
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
-
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+- name: Configure API Aggregation on masters
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - include: tasks/wire_aggregator.yml
+
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config
gather_facts: no
@@ -224,3 +224,13 @@
- role: openshift_excluder
r_openshift_excluder_action: enable
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Master Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 17f9ef4bc..f4dc9df8a 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -43,8 +43,14 @@
delay: 1
changed_when: false
+- include: ../openshift-master/set_network_facts.yml
+
+- include: ../openshift-etcd/certificates.yml
+
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
+- include: ../openshift-node/certificates.yml
+
- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
new file mode 100644
index 000000000..9a6cf26fc
--- /dev/null
+++ b/playbooks/common/openshift-master/set_network_facts.yml
@@ -0,0 +1,34 @@
+---
+- name: Read first master\'s config
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - stat:
+ path: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_stat
+ - slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_slurp
+
+- name: Set network facts for masters
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - block:
+ - set_fact:
+ osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
+ when: osm_cluster_network_cidr is not defined
+ - set_fact:
+ osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}"
+ when: osm_host_subnet_length is not defined
+ - set_fact:
+ openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
+ when: openshift_portal_net is not defined
+ - openshift_facts:
+ role: common
+ local_facts:
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ when:
+ - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
index 1c788470a..560eea785 100644
--- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml
+++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
@@ -9,24 +9,23 @@
path: /etc/origin/master/front-proxy-ca.crt
register: first_proxy_ca_crt
changed_when: false
- delegate_to: "{{ first_master }}"
+ delegate_to: "{{ groups.oo_first_master.0 }}"
- name: Check for First Master Aggregator Signer key
stat:
path: /etc/origin/master/front-proxy-ca.crt
register: first_proxy_ca_key
changed_when: false
- delegate_to: "{{ first_master }}"
-
+ delegate_to: "{{ groups.oo_first_master.0 }}"
# TODO: this currently has a bug where hostnames are required
- name: Creating First Master Aggregator signer certs
command: >
- oc adm ca create-signer-cert
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
--cert=/etc/origin/master/front-proxy-ca.crt
--key=/etc/origin/master/front-proxy-ca.key
--serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ first_master }}"
+ delegate_to: "{{ groups.oo_first_master.0 }}"
when:
- not first_proxy_ca_crt.stat.exists
- not first_proxy_ca_key.stat.exists
@@ -51,7 +50,7 @@
with_items:
- front-proxy-ca.crt
- front-proxy-ca.key
- delegate_to: "{{ first_master }}"
+ delegate_to: "{{ groups.oo_first_master.0 }}"
when:
- not proxy_ca_key.stat.exists
- not proxy_ca_crt.stat.exists
@@ -75,18 +74,36 @@
stat:
path: /etc/origin/master/aggregator-front-proxy.kubeconfig
register: first_front_proxy_kubeconfig
- delegate_to: "{{ first_master }}"
-
-- name: Create first master api-client config for Aggregator
- command: >
- oc adm create-api-client-config
- --certificate-authority=/etc/origin/master/front-proxy-ca.crt
- --signer-cert=/etc/origin/master/front-proxy-ca.crt
- --signer-key=/etc/origin/master/front-proxy-ca.key
- --user aggregator-front-proxy
- --client-dir=/etc/origin/master
- --signer-serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ first_master }}"
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the aggregator
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create first master api-client config for Aggregator
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir={{ certtemp.stdout }}
+ --signer-serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+ - name: Copy first master api-client config for Aggregator
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/"
+ remote_src: true
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
when:
- not first_front_proxy_kubeconfig.stat.exists
@@ -100,7 +117,7 @@
src: "/etc/origin/master/{{ item }}"
dest: "{{ certtemp.stdout }}/{{ item }}"
flat: yes
- delegate_to: "{{ first_master }}"
+ delegate_to: "{{ groups.oo_first_master.0 }}"
with_items:
- aggregator-front-proxy.crt
- aggregator-front-proxy.key
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..ce672daf5 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,6 +1,26 @@
---
+- name: NFS Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_nfs: "In Progress"
+ aggregate: false
+
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
+
+- name: NFS Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_nfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
new file mode 100644
index 000000000..fe51ef833
--- /dev/null
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -0,0 +1,52 @@
+---
+- name: create additional node network plugin groups
+ hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+ tasks:
+ # Creating these node groups will prevent a ton of skipped tasks.
+ # Create group for flannel nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_flannel | default(False)) | ternary('flannel','nothing') }}
+ changed_when: False
+ # Create group for calico nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }}
+ changed_when: False
+ # Create group for nuage nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_nuage | default(False)) | ternary('nuage','nothing') }}
+ changed_when: False
+ # Create group for contiv nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
+ changed_when: False
+
+- include: etcd_client_config.yml
+ vars:
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+
+- name: Additional node config
+ hosts: oo_nodes_use_flannel
+ roles:
+ - role: flannel
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ when: openshift_use_flannel | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_calico
+ roles:
+ - role: calico
+ when: openshift_use_calico | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_nuage
+ roles:
+ - role: nuage_node
+ when: openshift_use_nuage | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_contiv
+ roles:
+ - role: contiv
+ contiv_role: netplugin
+ when: openshift_use_contiv | default(false) | bool
diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/common/openshift-node/certificates.yml
new file mode 100644
index 000000000..908885ee6
--- /dev/null
+++ b/playbooks/common/openshift-node/certificates.yml
@@ -0,0 +1,8 @@
+---
+- name: Create OpenShift certificates for node hosts
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_node_certificates
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(false) | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 0801c41ff..4f8f98aef 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,97 +1,34 @@
---
-- name: Disable excluders
- hosts: oo_nodes_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- name: Evaluate node groups
- hosts: localhost
- become: no
- connection: local
+- name: Node Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
tasks:
- - name: Evaluate oo_containerized_master_nodes
- add_host:
- name: "{{ item }}"
- groups: oo_containerized_master_nodes
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when:
- - hostvars[item].openshift is defined
- - hostvars[item].openshift.common is defined
- - hostvars[item].openshift.common.is_containerized | bool
- - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
- changed_when: False
+ - name: Set Node install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_node: "In Progress"
+ aggregate: false
-- name: Configure containerized nodes
- hosts: oo_containerized_master_nodes
- serial: 1
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
+- include: certificates.yml
- roles:
- - role: os_firewall
- - role: openshift_node
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+- include: setup.yml
-- name: Configure nodes
- hosts: oo_nodes_to_config:!oo_containerized_master_nodes
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- roles:
- - role: os_firewall
- - role: openshift_node
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+- include: containerized_nodes.yml
-- name: Additional node config
- hosts: oo_nodes_to_config
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- roles:
- - role: flannel
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift_use_flannel | default(false) | bool
- - role: calico
- when: openshift_use_calico | default(false) | bool
- - role: nuage_node
- when: openshift_use_nuage | default(false) | bool
- - role: contiv
- contiv_role: netplugin
- when: openshift_use_contiv | default(false) | bool
- - role: nickhammond.logrotate
- - role: openshift_manage_node
- openshift_master_host: "{{ groups.oo_first_master.0 }}"
- when: not openshift_node_bootstrap | default(False)
- tasks:
- - name: Create group for deployment type
- group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
- changed_when: False
+- include: configure_nodes.yml
+
+- include: additional_config.yml
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- include: manage_node.yml
+
+- include: enable_excluders.yml
+
+- name: Node Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Node install 'Complete'
+ set_stats:
+ data:
+ installer_phase_node: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
new file mode 100644
index 000000000..17259422d
--- /dev/null
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -0,0 +1,17 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_node
+ - role: tuned
+ - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/common/openshift-node/containerized_nodes.yml
new file mode 100644
index 000000000..6fac937e3
--- /dev/null
+++ b/playbooks/common/openshift-node/containerized_nodes.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure containerized nodes
+ hosts: oo_containerized_master_nodes
+ serial: 1
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+
+ roles:
+ - role: os_firewall
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/common/openshift-node/enable_excluders.yml
new file mode 100644
index 000000000..5288b14f9
--- /dev/null
+++ b/playbooks/common/openshift-node/enable_excluders.yml
@@ -0,0 +1,8 @@
+---
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/common/openshift-node/etcd_client_config.yml
new file mode 100644
index 000000000..c3fa38a81
--- /dev/null
+++ b/playbooks/common/openshift-node/etcd_client_config.yml
@@ -0,0 +1,11 @@
+---
+- name: etcd_client node config
+ hosts: "{{ openshift_node_scale_up_group | default('this_group_does_not_exist') }}"
+ roles:
+ - role: openshift_facts
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_cert_prefix: flannel.etcd-
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
new file mode 100644
index 000000000..fc06621ee
--- /dev/null
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -0,0 +1,21 @@
+---
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../openshift-cluster/evaluate_groups.yml
+
+- name: run the std_include
+ include: ../openshift-cluster/initialize_facts.yml
+
+- name: run the std_include
+ include: ../openshift-cluster/initialize_openshift_repos.yml
+
+- name: run node config setup
+ include: setup.yml
+
+- name: run node config
+ include: configure_nodes.yml
+
+- name: Re-enable excluders
+ include: enable_excluders.yml
diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/common/openshift-node/manage_node.yml
new file mode 100644
index 000000000..f48a19a9c
--- /dev/null
+++ b/playbooks/common/openshift-node/manage_node.yml
@@ -0,0 +1,12 @@
+---
+- name: Additional node config
+ hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ roles:
+ - role: openshift_manage_node
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/common/openshift-node/setup.yml
new file mode 100644
index 000000000..794c03a67
--- /dev/null
+++ b/playbooks/common/openshift-node/setup.yml
@@ -0,0 +1,27 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Evaluate node groups
+ hosts: localhost
+ become: no
+ connection: local
+ tasks:
+ - name: Evaluate oo_containerized_master_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: oo_containerized_master_nodes
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+ when:
+ - hostvars[item].openshift is defined
+ - hostvars[item].openshift.common is defined
+ - hostvars[item].openshift.common.is_containerized | bool
+ - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ changed_when: False
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
new file mode 100644
index 000000000..a3d1d46a6
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -0,0 +1,19 @@
+---
+- name: Ensure all cloud resources necessary for the cluster, including instances, have been started
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+
+ - name: provision a GCP cluster in the specified project
+ include_role:
+ name: openshift_gcp
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index 12929b354..fa982d533 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -1,10 +1,19 @@
---
ansible_service_broker_remove: false
+ansible_service_broker_install: false
ansible_service_broker_log_level: info
ansible_service_broker_output_request: false
ansible_service_broker_recovery: true
ansible_service_broker_bootstrap_on_startup: true
-# Recommended you do not enable this for now
ansible_service_broker_dev_broker: false
+ansible_service_broker_refresh_interval: 600s
+# Recommended you do not enable this for now
ansible_service_broker_launch_apb_on_bind: false
+
+ansible_service_broker_image_pull_policy: IfNotPresent
+ansible_service_broker_sandbox_role: edit
+ansible_service_broker_auto_escalate: true
+ansible_service_broker_registry_tag: latest
+ansible_service_broker_registry_whitelist:
+ - '.*-apb$'
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index b3797ef96..0f4b71124 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -17,16 +17,24 @@
ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}"
ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
+ ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}"
ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
+ ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+
- name: set ansible-service-broker image facts using set prefix and tag
set_fact:
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+- slurp:
+ src: "{{ ansible_service_broker_certs_dir }}/ca.crt"
+ register: catalog_ca
+
+
- include: validate_facts.yml
@@ -42,53 +50,119 @@
namespace: openshift-ansible-service-broker
state: present
-- name: Set SA cluster-role
+- name: create ansible-service-broker client serviceaccount
+ oc_serviceaccount:
+ name: asb-client
+ namespace: openshift-ansible-service-broker
+ state: present
+
+- name: Create asb-auth cluster role
+ oc_clusterrole:
+ state: present
+ name: asb-auth
+ rules:
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["create", "delete"]
+ - apiGroups: ["authorization.openshift.io"]
+ resources: ["subjectrulesreview"]
+ verbs: ["create"]
+ - apiGroups: ["authorization.k8s.io"]
+ resources: ["subjectaccessreviews"]
+ verbs: ["create"]
+ - apiGroups: ["authentication.k8s.io"]
+ resources: ["tokenreviews"]
+ verbs: ["create"]
+
+- name: Create asb-access cluster role
+ oc_clusterrole:
+ state: present
+ name: asb-access
+ rules:
+ - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"]
+ verbs: ["get", "post", "put", "patch", "delete"]
+
+- name: Bind admin cluster-role to asb serviceaccount
oc_adm_policy_user:
state: present
- namespace: "openshift-ansible-service-broker"
+ namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: admin
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
-- name: create ansible-service-broker service
- oc_service:
- name: asb
+- name: Bind auth cluster role to asb service account
+ oc_adm_policy_user:
+ state: present
namespace: openshift-ansible-service-broker
+ resource_kind: cluster-role
+ resource_name: asb-auth
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb"
+
+- name: Bind asb-access role to asb-client service account
+ oc_adm_policy_user:
state: present
- labels:
- app: openshift-ansible-service-broker
- service: asb
- ports:
- - name: port-1338
- port: 1338
- selector:
- app: openshift-ansible-service-broker
- service: asb
+ namespace: openshift-ansible-service-broker
+ resource_kind: cluster-role
+ resource_name: asb-access
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
-- name: create etcd service
- oc_service:
- name: etcd
+- name: create asb-client token secret
+ oc_obj:
+ name: asb-client
+ state: present
+ kind: Secret
+ content:
+ path: /tmp/asbclientsecretout
+ data:
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: asb-client
+ annotations:
+ kubernetes.io/service-account.name: asb-client
+ type: kubernetes.io/service-account-token
+
+# Using oc_obj because oc_service doesn't seem to allow annotations
+# TODO: Extend oc_service to allow annotations
+- name: create ansible-service-broker service
+ oc_obj:
+ name: asb
namespace: openshift-ansible-service-broker
state: present
- ports:
- - name: etcd-advertise
- port: 2379
- selector:
- app: openshift-ansible-service-broker
- service: etcd
+ kind: Service
+ content:
+ path: /tmp/asbsvcout
+ data:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: asb
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: asb-tls
+ spec:
+ ports:
+ - name: port-1338
+ port: 1338
+ targetPort: 1338
+ protocol: TCP
+ selector:
+ app: openshift-ansible-service-broker
+ service: asb
- name: create route for ansible-service-broker service
oc_route:
name: asb-1338
namespace: openshift-ansible-service-broker
state: present
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
service_name: asb
port: 1338
- register: asb_route_out
-
-- name: get ansible-service-broker route name
- set_fact:
- ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}"
+ tls_termination: Reencrypt
- name: create persistent volume claim for etcd
oc_obj:
@@ -97,7 +171,7 @@
state: present
kind: PersistentVolumeClaim
content:
- path: /tmp/dcout
+ path: /tmp/pvcout
data:
apiVersion: v1
kind: PersistentVolumeClaim
@@ -111,50 +185,61 @@
requests:
storage: 1Gi
-- name: create etcd deployment
+- name: Create Ansible Service Broker deployment config
oc_obj:
- name: etcd
+ name: asb
namespace: openshift-ansible-service-broker
state: present
- kind: Deployment
+ kind: DeploymentConfig
content:
path: /tmp/dcout
data:
- apiVersion: extensions/v1beta1
- kind: Deployment
+ apiVersion: v1
+ kind: DeploymentConfig
metadata:
- name: etcd
- namespace: openshift-ansible-service-broker
+ name: asb
labels:
app: openshift-ansible-service-broker
- service: etcd
+ service: asb
spec:
+ replicas: 1
selector:
- matchLabels:
- app: openshift-ansible-service-broker
- service: etcd
+ app: openshift-ansible-service-broker
strategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
- replicas: 1
+ type: Rolling
template:
metadata:
labels:
app: openshift-ansible-service-broker
- service: etcd
+ service: asb
spec:
- restartPolicy: Always
+ serviceAccount: asb
containers:
+ - image: "{{ ansible_service_broker_image }}"
+ name: asb
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/ansible-service-broker
+ - name: asb-tls
+ mountPath: /etc/tls/private
+ ports:
+ - containerPort: 1338
+ protocol: TCP
+ env:
+ - name: BROKER_CONFIG
+ value: /etc/ansible-service-broker/config.yaml
+ resources: {}
+ terminationMessagePath: /tmp/termination-log
+
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
imagePullPolicy: IfNotPresent
terminationMessagePath: /tmp/termination-log
workingDir: /etcd
args:
- - '{{ ansible_service_broker_etcd_image_etcd_path }}'
- - --data-dir=/data
+ - "{{ ansible_service_broker_etcd_image_etcd_path }}"
+ - "--data-dir=/data"
- "--listen-client-urls=http://0.0.0.0:2379"
- "--advertise-client-urls=http://0.0.0.0:2379"
ports:
@@ -170,57 +255,15 @@
- name: etcd
persistentVolumeClaim:
claimName: etcd
-
-- name: create ansible-service-broker deployment
- oc_obj:
- name: asb
- namespace: openshift-ansible-service-broker
- state: present
- kind: Deployment
- content:
- path: /tmp/dcout
- data:
- apiVersion: extensions/v1beta1
- kind: Deployment
- metadata:
- name: asb
- namespace: openshift-ansible-service-broker
- labels:
- app: openshift-ansible-service-broker
- service: asb
- spec:
- strategy:
- type: Recreate
- replicas: 1
- template:
- metadata:
- labels:
- app: openshift-ansible-service-broker
- service: asb
- spec:
- serviceAccount: asb
- restartPolicy: Always
- containers:
- - image: "{{ ansible_service_broker_image }}"
- name: asb
- imagePullPolicy: IfNotPresent
- volumeMounts:
- - name: config-volume
- mountPath: /etc/ansible-service-broker
- ports:
- - containerPort: 1338
- protocol: TCP
- env:
- - name: BROKER_CONFIG
- value: /etc/ansible-service-broker/config.yaml
- terminationMessagePath: /tmp/termination-log
- volumes:
- name: config-volume
configMap:
name: broker-config
items:
- key: broker-config
path: config.yaml
+ - name: asb-tls
+ secret:
+ secretName: asb-tls
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
@@ -239,42 +282,65 @@
name: broker-config
namespace: openshift-ansible-service-broker
labels:
- app: ansible-service-broker
+ app: openshift-ansible-service-broker
data:
broker-config: |
registry:
- name: "{{ ansible_service_broker_registry_type }}"
- url: "{{ ansible_service_broker_registry_url }}"
- user: "{{ ansible_service_broker_registry_user }}"
- pass: "{{ ansible_service_broker_registry_password }}"
- org: "{{ ansible_service_broker_registry_organization }}"
+ - type: {{ ansible_service_broker_registry_type }}
+ name: {{ ansible_service_broker_registry_name }}
+ url: {{ ansible_service_broker_registry_url }}
+ user: {{ ansible_service_broker_registry_user }}
+ pass: {{ ansible_service_broker_registry_password }}
+ org: {{ ansible_service_broker_registry_organization }}
+ tag: {{ ansible_service_broker_registry_tag }}
+ white_list: {{ ansible_service_broker_registry_whitelist }}
dao:
- etcd_host: etcd
+ etcd_host: 0.0.0.0
etcd_port: 2379
log:
logfile: /var/log/ansible-service-broker/asb.log
stdout: true
- level: "{{ ansible_service_broker_log_level }}"
+ level: {{ ansible_service_broker_log_level }}
color: true
- openshift: {}
+ openshift:
+ host: ""
+ ca_file: ""
+ bearer_token_file: ""
+ sandbox_role: {{ ansible_service_broker_sandbox_role }}
+ image_pull_policy: {{ ansible_service_broker_image_pull_policy }}
broker:
dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }}
+ bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }}
+ refresh_interval: {{ ansible_service_broker_refresh_interval }}
launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }}
- recovery: {{ ansible_service_broker_recovery | bool | lower }}
output_request: {{ ansible_service_broker_output_request | bool | lower }}
- bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }}
+ recovery: {{ ansible_service_broker_recovery | bool | lower }}
+ ssl_cert_key: /etc/tls/private/tls.key
+ ssl_cert: /etc/tls/private/tls.crt
+ auto_escalate: {{ ansible_service_broker_auto_escalate }}
+ auth:
+ - type: basic
+ enabled: false
+
- name: Create the Broker resource in the catalog
oc_obj:
name: ansible-service-broker
state: present
- kind: Broker
+ kind: ServiceBroker
content:
path: /tmp/brokerout
data:
apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: Broker
+ kind: ServiceBroker
metadata:
name: ansible-service-broker
spec:
- url: http://asb.openshift-ansible-service-broker.svc:1338
+ url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+ authInfo:
+ bearer:
+ secretRef:
+ name: asb-client
+ namespace: openshift-ansible-service-broker
+ kind: Secret
+ caBundle: "{{ catalog_ca.content }}"
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
index b46ce8233..d8695bd3a 100644
--- a/roles/ansible_service_broker/tasks/main.yml
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -2,7 +2,7 @@
# do any asserts here
- include: install.yml
- when: not ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_install | default(false) | bool
- include: remove.yml
- when: ansible_service_broker_remove|default(false) | bool
+ when: ansible_service_broker_remove | default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
index 2519f9f4c..f0a6be226 100644
--- a/roles/ansible_service_broker/tasks/remove.yml
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -1,16 +1,57 @@
---
-- name: remove openshift-ansible-service-broker project
- oc_project:
- name: openshift-ansible-service-broker
- state: absent
-
- name: remove ansible-service-broker serviceaccount
oc_serviceaccount:
name: asb
namespace: openshift-ansible-service-broker
state: absent
+- name: remove ansible-service-broker client serviceaccount
+ oc_serviceaccount:
+ name: asb-client
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove asb-auth cluster role
+ oc_clusterrole:
+ state: absent
+ name: asb-auth
+
+- name: remove asb-access cluster role
+ oc_clusterrole:
+ state: absent
+ name: asb-access
+
+- name: Unbind admin cluster-role to asb serviceaccount
+ oc_adm_policy_user:
+ state: absent
+ namespace: openshift-ansible-service-broker
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb"
+
+- name: Unbind auth cluster role to asb service account
+ oc_adm_policy_user:
+ state: absent
+ namespace: openshift-ansible-service-broker
+ resource_kind: cluster-role
+ resource_name: asb-auth
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb"
+
+- name: Unbind asb-access role to asb-client service account
+ oc_adm_policy_user:
+ state: absent
+ namespace: openshift-ansible-service-broker
+ resource_kind: cluster-role
+ resource_name: asb-access
+ user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
+
+- name: remove asb-client token secret
+ oc_secret:
+ state: absent
+ name: asb-client
+ namespace: openshift-ansible-service-broker
+
- name: remove ansible-service-broker service
oc_service:
name: asb
@@ -35,19 +76,19 @@
namespace: openshift-ansible-service-broker
state: absent
-- name: remove etcd deployment
+- name: remove Ansible Service Broker deployment config
oc_obj:
- name: etcd
+ name: asb
namespace: openshift-ansible-service-broker
+ kind: DeploymentConfig
state: absent
- kind: Deployment
-- name: remove ansible-service-broker deployment
+- name: remove secret for broker auth
oc_obj:
- name: asb
+ name: asb-auth-secret
namespace: openshift-ansible-service-broker
+ kind: Broker
state: absent
- kind: Deployment
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
- name: remove config map for ansible-service-broker
@@ -62,4 +103,9 @@
oc_obj:
name: ansible-service-broker
state: absent
- kind: Broker
+ kind: ServiceBroker
+
+- name: remove openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: absent
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 15e448515..3e9639adf 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -8,6 +8,7 @@ __ansible_service_broker_etcd_image_tag: latest
__ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd
__ansible_service_broker_registry_type: dockerhub
+__ansible_service_broker_registry_name: dh
__ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index ce2ae8365..9c576cb76 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -7,7 +7,9 @@ __ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
+
__ansible_service_broker_registry_type: rhcc
+__ansible_service_broker_registry_name: rh
__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 39f730462..0e3863304 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -2,10 +2,14 @@
- name: Calico Node | Error if invalid cert arguments
fail:
msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
- when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
+ when:
+ - calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined
+ - not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
- name: Calico Node | Generate OpenShift-etcd certs
- include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ include_role:
+ name: etcd
+ tasks_from: client_certificates
when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
vars:
etcd_cert_prefix: calico.etcd-
@@ -28,18 +32,18 @@
msg: "Invalid etcd configuration for calico."
when: item is not defined or item == ''
with_items:
- - calico_etcd_ca_cert_file
- - calico_etcd_cert_file
- - calico_etcd_key_file
- - calico_etcd_endpoints
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file }}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file }}"
+ - "{{ calico_etcd_key_file }}"
- name: Calico Node | Configure Calico service unit file
template:
diff --git a/roles/cockpit-ui/defaults/main.yml b/roles/cockpit-ui/defaults/main.yml
new file mode 100644
index 000000000..b1696f1b8
--- /dev/null
+++ b/roles/cockpit-ui/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 0114498f8..244e2cc41 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -50,7 +50,9 @@
-n default
register: deploy_registry_console
changed_when: "'already exists' not in deploy_registry_console.stderr"
- failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
+ failed_when:
+ - "'already exists' not in deploy_registry_console.stderr"
+ - "deploy_registry_console.rc != 0"
- name: Delete temp directory
file:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 7e206ded1..e36dfa7b9 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,6 +1,22 @@
---
docker_cli_auth_config_path: '/root/.docker'
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input.
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_replace: False
+
+openshift_docker_additional_registries: []
+openshift_docker_blocked_registries: []
+openshift_docker_insecure_registries: []
+
+openshift_docker_ent_reg: 'registry.access.redhat.com'
+
+# The l2_docker_* variables convert csv strings to lists, if
+# necessary. These variables should be used in place of their respective
+# openshift_docker_* counterparts to ensure the properly formatted lists are
+# utilized.
+l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
+l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
+l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+
+containers_registries_conf_path: /etc/containers/registries.conf
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 591367467..866ed0452 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -4,6 +4,7 @@
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
+ daemon_reload: yes
register: r_docker_restart_docker_result
until: not r_docker_restart_docker_result | failed
retries: 3
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 78c6671d8..f73f90686 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -13,16 +13,17 @@
- name: Use Package Docker if Requested
include: package_docker.yml
when:
- - not l_use_system_container
- - not l_use_crio_only
+ - not l_use_system_container
+ - not l_use_crio_only
- name: Use System Container Docker if Requested
include: systemcontainer_docker.yml
when:
- - l_use_system_container
- - not l_use_crio_only
+ - l_use_system_container
+ - not l_use_crio_only
- name: Add CRI-O usage Requested
include: systemcontainer_crio.yml
when:
- - l_use_crio
+ - l_use_crio
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 145b552a6..888ae40e7 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -46,7 +48,17 @@
template:
dest: "{{ docker_systemd_dir }}/custom.conf"
src: custom.conf.j2
- when: not os_firewall_use_firewalld | default(False) | bool
+ notify:
+ - restart docker
+ when: not (os_firewall_use_firewalld | default(False)) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
- stat: path=/etc/sysconfig/docker
register: docker_check
@@ -56,20 +68,30 @@
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ when:
+ - item.reg_fact_val != []
+ - docker_check.stat.isreg is defined
+ - docker_check.stat.isreg
with_items:
- reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
+ reg_fact_val: "{{ l2_docker_additional_registries }}"
reg_flag: --add-registry
- reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
+ reg_fact_val: "{{ l2_docker_blocked_registries }}"
reg_flag: --block-registry
- reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
+ reg_fact_val: "{{ l2_docker_insecure_registries }}"
reg_flag: --insecure-registry
notify:
- restart docker
+- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf
+ template:
+ dest: "{{ containers_registries_conf_path }}"
+ src: registries.conf
+ notify:
+ - restart docker
+
- name: Set Proxy Settings
lineinfile:
dest: /etc/sysconfig/docker
@@ -117,17 +139,12 @@
notify:
- restart docker
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ docker_cli_auth_config_path }}/config.json"
- when: oreg_auth_user is defined
- register: docker_cli_auth_credentials_stat
-
-- name: Create credentials for docker cli registry auth
- command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+# The following task is needed as the systemd module may report a change in
+# state even though docker is already running.
+- name: Detect if docker is already started
+ command: "systemctl show docker -p ActiveState"
+ changed_when: False
+ register: r_docker_already_running_result
- name: Start the Docker service
systemd:
@@ -141,6 +158,18 @@
delay: 30
- set_fact:
- docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}"
+ docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
+
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- meta: flush_handlers
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 0bab0899c..fdc6cd24a 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -1,17 +1,34 @@
---
+
# TODO: Much of this file is shared with container engine tasks
- set_fact:
- l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
- when: openshift.docker.insecure_registries
+ l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
+ when: l2_docker_insecure_registries
- set_fact:
- l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
- when: openshift.docker.additional_registries
+ l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
+ when: l2_docker_additional_registries
- set_fact:
l_crio_registries: "{{ ['docker.io'] }}"
- when: not openshift.docker.additional_registries
+ when: not l2_docker_additional_registries
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: openshift.docker.additional_registries
+ when: l2_docker_additional_registries
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_image_tag | string }}"
+ when: openshift_image_tag is defined
+
+- set_fact:
+ l_openshift_image_tag: "latest"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release == "latest"
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_release | string }}"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release != "latest"
- name: Ensure container-selinux is installed
package:
@@ -92,33 +109,44 @@
- block:
- - name: Set to default prepend
+ - name: Set CRI-O image defaults
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
l_crio_image_name: "cri-o-fedora"
+ l_crio_image_tag: "latest"
- name: Use Centos based image when distribution is CentOS
set_fact:
l_crio_image_name: "cri-o-centos"
when: ansible_distribution == "CentOS"
+ - name: Set CRI-O image tag
+ set_fact:
+ l_crio_image_tag: "{{ l_openshift_image_tag }}"
+ when:
+ - openshift_deployment_type == 'openshift-enterprise'
+
- name: Use RHEL based image when distribution is Red Hat
set_fact:
- l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_prepend: "registry.access.redhat.com/openshift3"
l_crio_image_name: "cri-o"
when: ansible_distribution == "RedHat"
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
+ - name: Set the full image name
set_fact:
- l_crio_image_prepend: "{{ openshift_crio_systemcontainer_image_registry_override }}"
- when:
- - openshift_crio_systemcontainer_image_registry_override is defined
- - openshift_crio_systemcontainer_image_registry_override != ""
+ l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
- - name: Set the full image name
+ # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
+ - name: Use a specific image if requested
set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:latest"
+ l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
+ when:
+ - openshift_crio_systemcontainer_image_override is defined
+ - openshift_crio_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_crio_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
@@ -134,6 +162,14 @@
image: "{{ l_crio_image }}"
state: latest
+- name: Remove CRI-O default configuration files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/cni/net.d/200-loopback.conf
+ - /etc/cni/net.d/100-crio-bridge.conf
+
- name: Create the CRI-O configuration
template:
dest: /etc/crio/crio.conf
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 146e5f430..15c6a55db 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -1,4 +1,21 @@
---
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_image_tag | string }}"
+ when: openshift_image_tag is defined
+
+- set_fact:
+ l_openshift_image_tag: "latest"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release == "latest"
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_release | string }}"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release != "latest"
+
# If docker_options are provided we should fail. We should not install docker and ignore
# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
- name: Fail quickly if openshift_docker_options are set
@@ -89,6 +106,13 @@
- name: Set to default prepend
set_fact:
l_docker_image_prepend: "gscrivano"
+ l_docker_image_tag: "latest"
+
+ - name: Set container engine image tag
+ set_fact:
+ l_docker_image_tag: "{{ l_openshift_image_tag }}"
+ when:
+ - openshift_deployment_type == 'openshift-enterprise'
- name: Use Red Hat Registry for image when distribution is Red Hat
set_fact:
@@ -102,7 +126,7 @@
- name: Set the full image name
set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}"
# For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- name: Use a specific image if requested
@@ -148,10 +172,10 @@
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
- set_fact:
- l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
+ l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}"
l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
- l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}"
- l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
+ l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}"
+ l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}"
l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
# Configure container-engine using the container-daemon.json file
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index b4ee84fd0..b715c2ffa 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -13,12 +13,12 @@ runroot = "/var/run/containers/storage"
# storage_driver select which storage driver is used to manage storage
# of images and containers.
-storage_driver = "overlay2"
+storage_driver = "overlay"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{% if ansible_distribution in ['RedHat', 'CentOS'] %}
- "overlay2.override_kernel_check=1"
+ "overlay.override_kernel_check=1"
{% endif %}
]
@@ -35,6 +35,10 @@ stream_address = ""
# stream_port is the port on which the stream server will listen
stream_port = "10010"
+# file_locking is whether file-based locking will be used instead of
+# in-memory locking
+file_locking = true
+
# The "crio.runtime" table contains settings pertaining to the OCI
# runtime used and options for how to set up and manage the OCI runtime.
[crio.runtime]
@@ -67,6 +71,9 @@ runtime_untrusted_workload = ""
# container runtime for all containers.
default_workload_trust = "trusted"
+# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE
+no_pivot = false
+
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "/usr/libexec/crio/conmon"
@@ -93,6 +100,16 @@ apparmor_profile = "crio-default"
# for the runtime.
cgroup_manager = "systemd"
+# hooks_dir_path is the oci hooks directory for automatically executed hooks
+hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+
+# pids_limit is the number of processes allowed in a container
+pids_limit = 1024
+
+# log_size_max is the max limit for the container log size in bytes.
+# Negative values indicate that no limit is imposed.
+log_size_max = -1
+
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
[crio.image]
@@ -115,6 +132,10 @@ pause_command = "/pause"
# unspecified so that the default system-wide policy will be used.
signature_policy = ""
+# image_volumes controls how image volumes are handled.
+# The valid values are mkdir and ignore.
+image_volumes = "mkdir"
+
# insecure_registries is used to skip TLS verification when pulling images.
insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
@@ -125,6 +146,7 @@ insecure_registries = [
registries = [
{{ l_additional_crio_registries|default("") }}
]
+
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
index 9b47cb6ab..713412473 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/docker/templates/custom.conf.j2
@@ -3,3 +3,9 @@
[Unit]
Wants=iptables.service
After=iptables.service
+
+# The following line is a work-around to ensure docker is restarted whenever
+# iptables is restarted. This ensures the proper iptables rules will be in
+# place for docker.
+# Note: This will also cause docker to be stopped if iptables is stopped.
+PartOf=iptables.service
diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf
new file mode 100644
index 000000000..d379b2be0
--- /dev/null
+++ b/roles/docker/templates/registries.conf
@@ -0,0 +1,46 @@
+# {{ ansible_managed }}
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to YAML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries', 'insecure_registries',
+# and 'block_registries'.
+
+
+#registries:
+# - registry.access.redhat.com
+
+{% if l2_docker_additional_registries %}
+registries:
+{% for reg in l2_docker_additional_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to access insecure registries, uncomment the section below
+# and add the registries fully-qualified name. An insecure registry is one
+# that does not have a valid SSL certificate or only does HTTP.
+#insecure_registries:
+# -
+
+{% if l2_docker_insecure_registries %}
+insecure_registries:
+{% for reg in l2_docker_insecure_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#block_registries:
+# -
+
+{% if l2_docker_blocked_registries %}
+block_registries:
+{% for reg in l2_docker_blocked_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 3cc2bbb18..78f231416 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,66 @@
---
-r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
+# runc, docker, host
+r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
+
+# etcd server vars
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
+etcd_system_container_conf_dir: /var/lib/etcd/etc
+etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+# etcd ca vars
+etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
+etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server
+etcd_ca_exts_self: etcd_v3_ca_self
+etcd_ca_exts_client: etcd_v3_ca_client
+etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
+etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
+etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
+etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
+etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
+etcd_ca_default_days: 1825
+
+r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
+r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
+r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
+
+# etcd server & certificate vars
+etcd_hostname: "{{ inventory_hostname }}"
+etcd_ip: "{{ ansible_default_ipv4.address }}"
+etcd_is_atomic: False
+etcd_is_containerized: False
+etcd_is_thirdparty: False
+
+# etcd dir vars
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+
+# etcd ports and protocols
+etcd_client_port: 2379
+etcd_peer_port: 2380
+etcd_url_scheme: http
+etcd_peer_url_scheme: http
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
@@ -10,8 +70,16 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_client_port: 2379
-etcd_peer_port: 2380
+# required role variable
+#etcd_peer: 127.0.0.1
+etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
+
+etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
+# Location of the service file is fixed and not meant to be changed
+etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
+
+r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
r_etcd_os_firewall_deny: []
@@ -20,3 +88,6 @@ r_etcd_os_firewall_allow:
port: "{{etcd_client_port}}/tcp"
- service: etcd peering
port: "{{ etcd_peer_port }}/tcp"
+
+# set the backend quota to 4GB by default
+etcd_quota_backend_bytes: 4294967296
diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd/library/delegated_serial_command.py
index 0cab1ca88..0cab1ca88 100755
--- a/roles/etcd_common/library/delegated_serial_command.py
+++ b/roles/etcd/library/delegated_serial_command.py
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 9a955c822..879ca4f4e 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -18,5 +18,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: etcd_server_certificates
-- role: etcd_common
+- role: lib_utils
diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml
index 95a0e7c0a..1ed2db5bc 100644
--- a/roles/etcd_migrate/tasks/clean_data.yml
+++ b/roles/etcd/tasks/auxiliary/clean_data.yml
@@ -1,5 +1,5 @@
---
- name: Remove member data
file:
- path: /var/lib/etcd/member
+ path: "{{ etcd_data_dir }}/member"
state: absent
diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml
new file mode 100644
index 000000000..7c6d0409d
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml
@@ -0,0 +1,5 @@
+---
+- name: Disable etcd members
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
diff --git a/roles/etcd_common/tasks/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 6cb456677..11bd2310e 100644
--- a/roles/etcd_common/tasks/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -3,7 +3,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
-- name: Configure etcd profile.d alises
+- name: Configure etcd profile.d aliases
template:
dest: "/etc/profile.d/etcdctl.sh"
src: etcdctl.sh.j2
diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
new file mode 100644
index 000000000..ae8a36130
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
@@ -0,0 +1,31 @@
+---
+- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ backup: true
+
+- name: Start etcd
+ systemd:
+ name: "{{ etcd_service }}"
+ state: started
+
+- name: Wait for cluster to become healthy after bringing up first member
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
+
+- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ state: absent
+ backup: true
+
+- name: Restart first etcd host
+ systemd:
+ name: "{{ etcd_service }}"
+ state: restarted
diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml
new file mode 100644
index 000000000..6daa6dc51
--- /dev/null
+++ b/roles/etcd/tasks/backup.archive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/archive.yml
diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml
new file mode 100644
index 000000000..cc540cbca
--- /dev/null
+++ b/roles/etcd/tasks/backup.copy.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/copy.yml
diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml
new file mode 100644
index 000000000..26ec15043
--- /dev/null
+++ b/roles/etcd/tasks/backup.fetch.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/fetch.yml
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
new file mode 100644
index 000000000..24bd0540d
--- /dev/null
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -0,0 +1,12 @@
+---
+- include: backup/vars.yml
+
+- name: Move content of etcd backup under the etcd data directory
+ command: >
+ mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}"
+
+- name: Set etcd group for the etcd data directory
+ command: >
+ chown -R etcd:etcd "{{ l_etcd_data_dir }}"
+
+- include: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml
new file mode 100644
index 000000000..77a637360
--- /dev/null
+++ b/roles/etcd/tasks/backup.unarchive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/unarchive.yml
diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml
new file mode 100644
index 000000000..c0538e596
--- /dev/null
+++ b/roles/etcd/tasks/backup.yml
@@ -0,0 +1,2 @@
+---
+- include: backup/backup.yml
diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml
new file mode 100644
index 000000000..f6aa68a6e
--- /dev/null
+++ b/roles/etcd/tasks/backup/archive.yml
@@ -0,0 +1,5 @@
+---
+- name: Archive backup
+ archive:
+ path: "{{ l_etcd_backup_dir }}"
+ dest: "{{ l_etcd_backup_dir }}.tgz"
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd/tasks/backup/backup.yml
index c1580640f..ec1a1989c 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -1,21 +1,5 @@
---
-# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
-# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
-# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
-- set_fact:
- l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
-
-- set_fact:
- l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
-
-- set_fact:
- l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
-
-- set_fact:
- l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
-
-- set_fact:
- l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+- include: vars.yml
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
@@ -36,7 +20,7 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml
new file mode 100644
index 000000000..16604bae8
--- /dev/null
+++ b/roles/etcd/tasks/backup/copy.yml
@@ -0,0 +1,5 @@
+---
+- name: Copy etcd backup
+ copy:
+ src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
+ dest: "{{ l_etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml
new file mode 100644
index 000000000..610ce1960
--- /dev/null
+++ b/roles/etcd/tasks/backup/fetch.yml
@@ -0,0 +1,8 @@
+---
+- name: Fetch etcd backup
+ fetch:
+ src: "{{ l_etcd_backup_dir }}.tgz"
+ dest: "{{ etcd_backup_sync_directory }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml
new file mode 100644
index 000000000..6c75d00a7
--- /dev/null
+++ b/roles/etcd/tasks/backup/unarchive.yml
@@ -0,0 +1,14 @@
+---
+- shell: ls /var/lib/etcd
+ register: output
+
+- debug:
+ msg: "output: {{ output }}"
+
+- name: Unarchive backup
+ # can't use unarchive https://github.com/ansible/ansible/issues/30821
+ # unarchive:
+ # src: "{{ l_etcd_backup_dir }}.tgz"
+ # dest: "{{ l_etcd_backup_dir }}"
+ command: >
+ tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml
new file mode 100644
index 000000000..3c009f557
--- /dev/null
+++ b/roles/etcd/tasks/backup/vars.yml
@@ -0,0 +1,18 @@
+---
+# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
+# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
+# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
+- set_fact:
+ l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+
+- set_fact:
+ l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
+
+- set_fact:
+ l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
+
+- set_fact:
+ l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
+
+- set_fact:
+ l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml
new file mode 100644
index 000000000..a41b032f3
--- /dev/null
+++ b/roles/etcd/tasks/backup_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_ca_certificates.yml
diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml
new file mode 100644
index 000000000..8cf2a10cc
--- /dev/null
+++ b/roles/etcd/tasks/backup_generated_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_generated_certificates.yml
diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..129e1831c
--- /dev/null
+++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_master_etcd_certificates.yml
diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml
new file mode 100644
index 000000000..267ffeb4d
--- /dev/null
+++ b/roles/etcd/tasks/backup_server_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_server_certificates.yml
diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml
new file mode 100644
index 000000000..cca1e9ad7
--- /dev/null
+++ b/roles/etcd/tasks/ca.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/deploy_ca.yml
diff --git a/roles/etcd/tasks/certificates/backup_ca_certificates.yml b/roles/etcd/tasks/certificates/backup_ca_certificates.yml
new file mode 100644
index 000000000..f60eb82ef
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_ca_certificates.yml
@@ -0,0 +1,12 @@
+---
+- name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+- name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
diff --git a/roles/etcd/tasks/certificates/backup_generated_certificates.yml b/roles/etcd/tasks/certificates/backup_generated_certificates.yml
new file mode 100644
index 000000000..6a24cfcb3
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_generated_certificates.yml
@@ -0,0 +1,13 @@
+---
+- name: Determine if generated etcd certificates exist
+ stat:
+ path: "{{ etcd_conf_dir }}/generated_certs"
+ register: etcd_generated_certs_dir_stat
+
+- name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_conf_dir }}/generated_certs
+ args:
+ warn: no
+ when: etcd_generated_certs_dir_stat.stat.exists | bool
diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..e65b3e5a2
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
@@ -0,0 +1,7 @@
+---
+- name: Backup master etcd certificates
+ shell: >
+ tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ /etc/origin/master/master.etcd-*
+ args:
+ warn: no
diff --git a/roles/etcd/tasks/certificates/backup_server_certificates.yml b/roles/etcd/tasks/certificates/backup_server_certificates.yml
new file mode 100644
index 000000000..8e6cc6965
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_server_certificates.yml
@@ -0,0 +1,11 @@
+---
+- name: Backup etcd certificates
+ command: >
+ tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_conf_dir }}/ca.crt
+ {{ etcd_conf_dir }}/server.crt
+ {{ etcd_conf_dir }}/server.key
+ {{ etcd_conf_dir }}/peer.crt
+ {{ etcd_conf_dir }}/peer.key
+ args:
+ warn: no
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd/tasks/certificates/deploy_ca.yml
index b4dea4a07..3d32290a2 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/deploy_ca.yml
@@ -1,6 +1,8 @@
---
- name: Install openssl
- package: name=openssl state=present
+ package:
+ name: openssl
+ state: present
when: not etcd_is_atomic | bool
delegate_to: "{{ etcd_ca_host }}"
run_once: true
diff --git a/roles/etcd/tasks/certificates/distribute_ca.yml b/roles/etcd/tasks/certificates/distribute_ca.yml
new file mode 100644
index 000000000..632ac15dd
--- /dev/null
+++ b/roles/etcd/tasks/certificates/distribute_ca.yml
@@ -0,0 +1,47 @@
+---
+- name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+
+- name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_sync_cert_dir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+
+- name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+
+- name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ etcd_sync_cert_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+
+- name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+
+- name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+
+- copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
index bbd29ece1..119071a72 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
@@ -9,7 +9,7 @@
- fail:
msg: >
CA certificate {{ etcd_ca_cert }} doesn't exist on CA host
- {{ etcd_ca_host }}. Apply 'etcd_ca' role to
+ {{ etcd_ca_host }}. Apply 'etcd_ca' action from `etcd` role to
{{ etcd_ca_host }}.
when: not g_ca_cert_stat_result.stat.exists | bool
run_once: true
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 4795188a6..26492fb3c 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -1,6 +1,8 @@
---
- name: Install etcd
- package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
+ package:
+ name: "etcd{{ '-' + etcd_version if etcd_version is defined else '' }}"
+ state: present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/etcd/tasks/certificates/remove_ca_certificates.yml b/roles/etcd/tasks/certificates/remove_ca_certificates.yml
new file mode 100644
index 000000000..4a86eb60d
--- /dev/null
+++ b/roles/etcd/tasks/certificates/remove_ca_certificates.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
diff --git a/roles/etcd/tasks/certificates/remove_generated_certificates.yml b/roles/etcd/tasks/certificates/remove_generated_certificates.yml
new file mode 100644
index 000000000..993b18de2
--- /dev/null
+++ b/roles/etcd/tasks/certificates/remove_generated_certificates.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove generated etcd certificates
+ file:
+ path: "{{ etcd_conf_dir }}/generated_certs"
+ state: absent
diff --git a/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml
new file mode 100644
index 000000000..70b5c6523
--- /dev/null
+++ b/roles/etcd/tasks/certificates/retrieve_ca_certificates.yml
@@ -0,0 +1,8 @@
+---
+- name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ etcd_sync_cert_dir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml
new file mode 100644
index 000000000..75c110972
--- /dev/null
+++ b/roles/etcd/tasks/check_cluster_health.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check_cluster_health.yml
diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml
new file mode 100644
index 000000000..d131ffd21
--- /dev/null
+++ b/roles/etcd/tasks/clean_data.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml
new file mode 100644
index 000000000..2f4108a0d
--- /dev/null
+++ b/roles/etcd/tasks/client_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/fetch_client_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml
new file mode 100644
index 000000000..9202e6e48
--- /dev/null
+++ b/roles/etcd/tasks/disable_etcd.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/disable_etcd.yml
diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca
new file mode 100644
index 000000000..040c5f7af
--- /dev/null
+++ b/roles/etcd/tasks/distribute_ca
@@ -0,0 +1,2 @@
+---
+- include: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml
new file mode 100644
index 000000000..4c1f609f7
--- /dev/null
+++ b/roles/etcd/tasks/drop_etcdctl.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/drop_etcdctl.yml
diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml
new file mode 100644
index 000000000..513eed17a
--- /dev/null
+++ b/roles/etcd/tasks/fetch_backup.yml
@@ -0,0 +1,8 @@
+---
+- include: backup/vars.yml
+
+- include: backup/archive.yml
+
+- include: backup/sync_backup.yml
+
+- include: backup/
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 78e543ef1..3e69af314 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -14,10 +14,7 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- include_role:
- name: etcd_common
- vars:
- r_etcd_common_action: drop_etcdctl
+- include: drop_etcdctl.yml
when:
- openshift_etcd_etcdctl_profile | default(true) | bool
diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml
new file mode 100644
index 000000000..bc27e4ea1
--- /dev/null
+++ b/roles/etcd/tasks/migrate.add_ttls.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/add_ttls.yml
diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml
new file mode 100644
index 000000000..3ada6e362
--- /dev/null
+++ b/roles/etcd/tasks/migrate.configure_master.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/configure_master.yml
diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml
new file mode 100644
index 000000000..124d21561
--- /dev/null
+++ b/roles/etcd/tasks/migrate.pre_check.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check.yml
diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml
new file mode 100644
index 000000000..5d5385873
--- /dev/null
+++ b/roles/etcd/tasks/migrate.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/migrate.yml
diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index c10465af9..14625e49e 100644
--- a/roles/etcd_migrate/tasks/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -8,6 +8,7 @@
accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}"
authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}"
controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"
+
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
oadm migrate etcd-ttl \
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd/tasks/migration/check.yml
index 0804d9e1c..0804d9e1c 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd/tasks/migration/check_cluster_health.yml
index 201d83f99..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd/tasks/migration/check_cluster_health.yml
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd/tasks/migration/check_cluster_status.yml
index b69fb5a52..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd/tasks/migration/check_cluster_status.yml
diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd/tasks/migration/configure_master.yml
index a305d5bf3..a305d5bf3 100644
--- a/roles/etcd_migrate/tasks/configure.yml
+++ b/roles/etcd/tasks/migration/configure_master.yml
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd/tasks/migration/migrate.yml
index 54a9c74ff..54a9c74ff 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd/tasks/migration/migrate.yml
diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml
new file mode 100644
index 000000000..36df1a1cc
--- /dev/null
+++ b/roles/etcd/tasks/remove_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/remove_ca_certificates.yml
diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml
new file mode 100644
index 000000000..b10a4b32d
--- /dev/null
+++ b/roles/etcd/tasks/remove_generated_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/remove_generated_certificates.yml
diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml
new file mode 100644
index 000000000..bd6c4ec85
--- /dev/null
+++ b/roles/etcd/tasks/retrieve_ca_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/retrieve_ca_certificates.yml
diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml
new file mode 100644
index 000000000..ae26079f9
--- /dev/null
+++ b/roles/etcd/tasks/server_certificates.yml
@@ -0,0 +1,6 @@
+---
+- include: ca.yml
+ when:
+ - etcd_ca_setup | default(True) | bool
+
+- include: certificates/fetch_server_certificates_from_ca.yml
diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml
index 136ec1142..24071f9ad 100644
--- a/roles/etcd_upgrade/tasks/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_image.yml
@@ -20,6 +20,11 @@
regexp: "{{ current_image.stdout }}$"
replace: "{{ new_etcd_image }}"
+- lineinfile:
+ destfile: "{{ etcd_conf_file }}"
+ regexp: '^ETCD_QUOTA_BACKEND_BYTES='
+ line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}"
+
- name: Restart etcd_container
systemd:
name: "{{ etcd_service }}"
@@ -29,8 +34,15 @@
## TODO: probably should just move this into the backup playbooks, also this
## will fail on atomic host. We need to revisit how to do etcd backups there as
## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Upgrade etcd for etcdctl when not atomic
- package: name=etcd state=latest
+ package:
+ name: etcd
+ state: latest
when: not l_ostree_booted.stat.exists | bool
- name: Verify cluster is healthy
diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
index 324b69605..505e28afb 100644
--- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml
@@ -19,6 +19,11 @@
name: "{{ l_etcd_target_package }}"
state: latest
+- lineinfile:
+ destfile: "{{ etcd_conf_file }}"
+ regexp: '^ETCD_QUOTA_BACKEND_BYTES='
+ line: "ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}"
+
- name: Restart etcd
service:
name: "{{ etcd_service }}"
diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml
new file mode 100644
index 000000000..9e69027eb
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_image.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_image.yml
diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml
new file mode 100644
index 000000000..29603d2b6
--- /dev/null
+++ b/roles/etcd/tasks/upgrade_rpm.yml
@@ -0,0 +1,2 @@
+---
+- include: upgrade/upgrade_rpm.yml
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 2c2803aee..8462bb4c8 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -45,6 +45,7 @@ ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#ETCD_ENABLE_V2="true"
+ETCD_QUOTA_BACKEND_BYTES={{ etcd_quota_backend_bytes }}
#[proxy]
#ETCD_PROXY=off
diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd/templates/etcdctl.sh.j2
index ac7d9c72f..ac7d9c72f 100644
--- a/roles/etcd_common/templates/etcdctl.sh.j2
+++ b/roles/etcd/templates/etcdctl.sh.j2
diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd/templates/openssl_append.j2
index f28316fc2..f28316fc2 100644
--- a/roles/etcd_ca/templates/openssl_append.j2
+++ b/roles/etcd/templates/openssl_append.j2
diff --git a/roles/etcd_ca/README.md b/roles/etcd_ca/README.md
deleted file mode 100644
index 60a880e30..000000000
--- a/roles/etcd_ca/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-etcd_ca
-========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/README.md b/roles/etcd_client_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_client_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_client_certificates/meta/main.yml b/roles/etcd_client_certificates/meta/main.yml
deleted file mode 100644
index efebdb599..000000000
--- a/roles/etcd_client_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Client Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
deleted file mode 100644
index d1c3a6602..000000000
--- a/roles/etcd_common/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-etcd_common
-========================
-
-Common resources for dependent etcd roles. E.g. default variables for:
-* config directories
-* certificates
-* ports
-* other settings
-
-Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g.
-
-```yaml
-- delegated_serial_command:
- command: /usr/bin/make_database.sh arg1 arg2
- creates: /path/to/database
-```
-
-Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).
-
-Dependencies
-------------
-
-openshift-repos
-
-Example Playbook
-----------------
-
-**Drop etcdctl aliases**
-
-```yaml
-- include_role:
- name: etcd_common
- tasks_from: etcdctl
-```
-
-**Get access to common variables**
-
-```yaml
-# meta.yml of etcd
-...
-dependencies:
-- { role: etcd_common }
-```
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
deleted file mode 100644
index b67411f40..000000000
--- a/roles/etcd_common/defaults/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-# Default action when calling this role
-r_etcd_common_action: noop
-r_etcd_common_backup_tag: ''
-r_etcd_common_backup_sufix_name: ''
-
-# runc, docker, host
-r_etcd_common_etcd_runtime: "docker"
-r_etcd_common_embedded_etcd: false
-
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
-
-# etcd server vars
-etcd_conf_dir: '/etc/etcd'
-r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
-etcd_system_container_conf_dir: /var/lib/etcd/etc
-etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
-
-# etcd ca vars
-etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
-etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server
-etcd_ca_exts_self: etcd_v3_ca_self
-etcd_ca_exts_client: etcd_v3_ca_client
-etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
-etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
-etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
-etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
-etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
-etcd_ca_default_days: 1825
-
-r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
-r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
-r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
-
-# etcd server & certificate vars
-etcd_hostname: "{{ inventory_hostname }}"
-etcd_ip: "{{ ansible_default_ipv4.address }}"
-etcd_is_atomic: False
-etcd_is_containerized: False
-etcd_is_thirdparty: False
-
-# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
-
-# etcd ports and protocols
-etcd_client_port: 2379
-etcd_peer_port: 2380
-etcd_url_scheme: http
-etcd_peer_url_scheme: http
-
-etcd_initial_cluster_state: new
-etcd_initial_cluster_token: etcd-cluster-1
-
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-
-etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
deleted file mode 100644
index 6ed87e6c7..000000000
--- a/roles/etcd_common/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Fail if invalid r_etcd_common_action provided
- fail:
- msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
- when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
-
-- name: Include main action task file
- include: "{{ r_etcd_common_action }}.yml"
- when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
deleted file mode 100644
index a88d78235..000000000
--- a/roles/etcd_common/tasks/noop.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# This is file is here because the usage of tags, specifically `pre_upgrade`
-# breaks the functionality of this role.
-# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/etcd_common/vars/main.yml b/roles/etcd_common/vars/main.yml
deleted file mode 100644
index 00d697776..000000000
--- a/roles/etcd_common/vars/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
-# Location of the service file is fixed and not meant to be changed
-etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md
deleted file mode 100644
index 369e78ff2..000000000
--- a/roles/etcd_migrate/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Role Name
-=========
-
-Offline etcd migration of data from v2 to v3
-
-Requirements
-------------
-
-It is expected all consumers of the etcd data are not accessing the data.
-Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster.
-
-The role itself is responsible for:
-- checking etcd cluster health and raft status before the migration
-- checking of presence of any v3 data (in that case the migration is stopped)
-- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string)
-- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value)
-
-The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started.
-
-Role Variables
---------------
-
-TBD
-
-Dependencies
-------------
-
-- etcd_common
-- lib_utils
-
-Example Playbook
-----------------
-
-```yaml
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_etcd_migrate
- vars:
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jan Chaloupka (jchaloup@redhat.com)
diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml
deleted file mode 100644
index 05cf41fbb..000000000
--- a/roles/etcd_migrate/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# Default action when calling this role, choices: check, migrate, configure
-r_etcd_migrate_action: migrate
diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml
deleted file mode 100644
index f3cabbef6..000000000
--- a/roles/etcd_migrate/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description: Etcd migration
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: etcd_common }
-- { role: lib_utils }
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
deleted file mode 100644
index e82f6a6b4..000000000
--- a/roles/etcd_migrate/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Fail if invalid r_etcd_migrate_action provided
- fail:
- msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'"
- when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data']
-
-- name: Include main action task file
- include: "{{ r_etcd_migrate_action }}.yml"
-
-# 2. migrate v2 datadir into v3:
-# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl
-# backup the etcd datadir first
-# Provide a way for an operator to specify transformer
-
-# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml
-# set storage-backend to “etcd3”
-# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master))
-
-# Run
-# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health
-# to check the cluster health (from the etcdctl.sh aliases file)
-
-# Another assumption:
-# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting)
-# -
diff --git a/roles/etcd_server_certificates/README.md b/roles/etcd_server_certificates/README.md
deleted file mode 100644
index 269d5296d..000000000
--- a/roles/etcd_server_certificates/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Etcd Certificates
-===========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Scott Dodson (sdodson@redhat.com)
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 4b6013a49..000000000
--- a/roles/etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
deleted file mode 100644
index 61bbba225..000000000
--- a/roles/etcd_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-r_etcd_upgrade_action: upgrade
-r_etcd_upgrade_mechanism: rpm
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
deleted file mode 100644
index afdb0267f..000000000
--- a/roles/etcd_upgrade/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Chaloupka
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- role: etcd_common
- r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
deleted file mode 100644
index 129c69d6b..000000000
--- a/roles/etcd_upgrade/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# INPUT r_etcd_upgrade_action
-- name: Fail if invalid etcd_upgrade_action provided
- fail:
- msg: "etcd_upgrade role can only be called with 'upgrade'"
- when:
- - r_etcd_upgrade_action not in ['upgrade']
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: l_ostree_booted
-
-- include: "{{ r_etcd_upgrade_action }}.yml"
diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml
deleted file mode 100644
index 420c9638e..000000000
--- a/roles/etcd_upgrade/tasks/upgrade.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# INPUT r_etcd_upgrade_version
-# INPUT r_etcd_upgrade_mechanism
-- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade
- fail:
- msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'"
- when:
- - r_etcd_upgrade_mechanism not in ['rpm', 'image']
-
-- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd"
- include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml
diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml
deleted file mode 100644
index 5ed919d42..000000000
--- a/roles/etcd_upgrade/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# EXPECTS etcd_peer
-etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 0c7347603..b9e15e6e0 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -27,8 +27,6 @@ Role Variables
Dependencies
------------
-openshift_facts
-
Example Playbook
----------------
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 02f5a5f64..889069485 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -12,3 +12,12 @@
until: not l_docker_restart_docker_in_flannel_result | failed
retries: 3
delay: 30
+
+- name: restart node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
+ register: l_restart_node_result
+ until: not l_restart_node_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 35f825586..51128dba6 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -12,7 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_facts
-- role: openshift_etcd_client_certificates
- etcd_cert_prefix: flannel.etcd-
+dependencies: []
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 71c8f38c3..1d0f5df6a 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,6 +1,6 @@
---
flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
-flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
+flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
new file mode 100644
index 000000000..83e00e504
--- /dev/null
+++ b/roles/installer_checkpoint/README.md
@@ -0,0 +1,176 @@
+OpenShift-Ansible Installer Checkpoint
+======================================
+
+A complete OpenShift cluster installation is comprised of many different
+components which can take 30 minutes to several hours to complete. If the
+installation should fail, it could be confusing to understand at which component
+the failure occurred. Additionally, it may be desired to re-run only the
+component which failed instead of starting over from the beginning. Components
+which came after the failed component would also need to be run individually.
+
+Design
+------
+
+The Installer Checkpoint implements an Ansible callback plugin to allow
+displaying and logging of the installer status at the end of a playbook run.
+
+To ensure the callback plugin is loaded, regardless of ansible.cfg file
+configuration, the plugin has been placed inside the installer_checkpoint role
+which must be called early in playbook execution. The `std_include.yml` playbook
+is run first for all entry point playbooks, therefore, the initialization of the
+checkpoint plugin has been placed at the beginning of that file.
+
+Playbooks use the [set_stats][set_stats] Ansible module to set a custom stats
+variable indicating the status of the phase being executed.
+
+The installer_checkpoint.py callback plugin extends the Ansible
+`v2_playbook_on_stats` method, which is called at the end of a playbook run, to
+display the status of each phase which was run. The INSTALLER STATUS report is
+displayed immediately following the PLAY RECAP.
+
+Phases of cluster installation are mapped to the steps in the
+[common/openshift-cluster/config.yml][openshift_cluster_config] playbook.
+
+To correctly display the order of the installer phases, the `installer_phases`
+variable defines the phase or component order.
+
+```python
+ # Set the order of the installer phases
+ installer_phases = [
+ 'installer_phase_initialize',
+ 'installer_phase_etcd',
+ 'installer_phase_nfs',
+ 'installer_phase_loadbalancer',
+ 'installer_phase_master',
+ 'installer_phase_master_additional',
+ 'installer_phase_node',
+ 'installer_phase_glusterfs',
+ 'installer_phase_hosted',
+ 'installer_phase_metrics',
+ 'installer_phase_logging',
+ 'installer_phase_servicecatalog',
+ ]
+```
+
+Additional attributes, such as display title and component playbook, of each
+phase are stored in the `phase_attributes` variable.
+
+```python
+ # Define the attributes of the installer phases
+ phase_attributes = {
+ 'installer_phase_initialize': {
+ 'title': 'Initialization',
+ 'playbook': ''
+ },
+ 'installer_phase_etcd': {
+ 'title': 'etcd Install',
+ 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ },
+ 'installer_phase_nfs': {
+ 'title': 'NFS Install',
+ 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ },
+ #...
+ }
+```
+
+Usage
+-----
+
+In order to indicate the beginning of a component installation, a play must be
+added to the beginning of the main playbook for the component to set the phase
+status to "In Progress". Additionally, a play must be added after the last play
+for that component to set the phase status to "Complete".
+
+The following example shows the first play of the 'installer phase' loading the
+`installer_checkpoint` role, as well as the `set_stats` task for setting
+`installer_phase_initialize` to "In Progress". Various plays are run for the
+phase/component and then a final play for setting `installer_hase_initialize` to
+"Complete".
+
+```yaml
+# common/openshift-cluster/std_include.yml
+---
+- name: Initialization Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ set_stats:
+ data:
+ installer_phase_initialize: "In Progress"
+ aggregate: false
+
+#...
+# Various plays here
+#...
+
+- name: Initialization Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ set_stats:
+ data:
+ installer_phase_initialize: "Complete"
+ aggregate: false
+```
+
+Each phase or component of the installer will follow a similar pattern, with the
+exception that the `installer_checkpoint` role does not need to be called since
+it was already loaded by the play in `std_include.yml`. It is important to
+place the 'In Progress' and 'Complete' plays as the first and last plays of the
+phase or component.
+
+Examples
+--------
+
+Example display of a successful playbook run:
+
+```
+PLAY RECAP *********************************************************************
+master01.example.com : ok=158 changed=16 unreachable=0 failed=0
+node01.example.com : ok=469 changed=74 unreachable=0 failed=0
+node02.example.com : ok=157 changed=17 unreachable=0 failed=0
+localhost : ok=24 changed=0 unreachable=0 failed=0
+
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : Complete
+Master Additional Install : Complete
+Node Install : Complete
+GlusterFS Install : Not Started
+Hosted Install : Complete
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+```
+
+Example display if a failure occurs during execution:
+
+```
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : In Progress
+ This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
+Master Additional Install : Not Started
+Node Install : Not Started
+GlusterFS Install : Not Started
+Hosted Install : Not Started
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+```
+
+[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html
+[openshift_cluster_config]: https://github.com/openshift/openshift-ansible/blob/master/playbooks/common/openshift-cluster/config.yml
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
new file mode 100644
index 000000000..ac369b882
--- /dev/null
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -0,0 +1,187 @@
+"""Ansible callback plugin to print a summary completion status of installation
+phases.
+"""
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+DOCUMENTATION = '''
+
+'''
+
+EXAMPLES = '''
+---------------------------------------------
+Example display of a successful playbook run:
+
+PLAY RECAP *********************************************************************
+master01.example.com : ok=158 changed=16 unreachable=0 failed=0
+node01.example.com : ok=469 changed=74 unreachable=0 failed=0
+node02.example.com : ok=157 changed=17 unreachable=0 failed=0
+localhost : ok=24 changed=0 unreachable=0 failed=0
+
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : Complete
+Master Additional Install : Complete
+Node Install : Complete
+GlusterFS Install : Not Started
+Hosted Install : Complete
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+
+-----------------------------------------------------
+Example display if a failure occurs during execution:
+
+INSTALLER STATUS ***************************************************************
+Initialization : Complete
+etcd Install : Complete
+NFS Install : Not Started
+Load balancer Install : Not Started
+Master Install : In Progress
+ This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
+Master Additional Install : Not Started
+Node Install : Not Started
+GlusterFS Install : Not Started
+Hosted Install : Not Started
+Metrics Install : Not Started
+Logging Install : Not Started
+Service Catalog Install : Not Started
+
+'''
+
+
+class CallbackModule(CallbackBase):
+ """This callback summarizes installation phase status."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'installer_checkpoint'
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ def v2_playbook_on_stats(self, stats):
+
+ # Set the order of the installer phases
+ installer_phases = [
+ 'installer_phase_initialize',
+ 'installer_phase_etcd',
+ 'installer_phase_nfs',
+ 'installer_phase_loadbalancer',
+ 'installer_phase_master',
+ 'installer_phase_master_additional',
+ 'installer_phase_node',
+ 'installer_phase_glusterfs',
+ 'installer_phase_hosted',
+ 'installer_phase_metrics',
+ 'installer_phase_logging',
+ 'installer_phase_servicecatalog',
+ 'installer_phase_management',
+ ]
+
+ # Define the attributes of the installer phases
+ phase_attributes = {
+ 'installer_phase_initialize': {
+ 'title': 'Initialization',
+ 'playbook': ''
+ },
+ 'installer_phase_etcd': {
+ 'title': 'etcd Install',
+ 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ },
+ 'installer_phase_nfs': {
+ 'title': 'NFS Install',
+ 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ },
+ 'installer_phase_loadbalancer': {
+ 'title': 'Load balancer Install',
+ 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
+ },
+ 'installer_phase_master': {
+ 'title': 'Master Install',
+ 'playbook': 'playbooks/byo/openshift-master/config.yml'
+ },
+ 'installer_phase_master_additional': {
+ 'title': 'Master Additional Install',
+ 'playbook': 'playbooks/byo/openshift-master/additional_config.yml'
+ },
+ 'installer_phase_node': {
+ 'title': 'Node Install',
+ 'playbook': 'playbooks/byo/openshift-node/config.yml'
+ },
+ 'installer_phase_glusterfs': {
+ 'title': 'GlusterFS Install',
+ 'playbook': 'playbooks/byo/openshift-glusterfs/config.yml'
+ },
+ 'installer_phase_hosted': {
+ 'title': 'Hosted Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml'
+ },
+ 'installer_phase_metrics': {
+ 'title': 'Metrics Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
+ },
+ 'installer_phase_logging': {
+ 'title': 'Logging Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
+ },
+ 'installer_phase_servicecatalog': {
+ 'title': 'Service Catalog Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
+ },
+ 'installer_phase_management': {
+ 'title': 'Management Install',
+ 'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml'
+ },
+ }
+
+ # Find the longest phase title
+ max_column = 0
+ for phase in phase_attributes:
+ max_column = max(max_column, len(phase_attributes[phase]['title']))
+
+ if '_run' in stats.custom:
+ self._display.banner('INSTALLER STATUS')
+ for phase in installer_phases:
+ phase_title = phase_attributes[phase]['title']
+ padding = max_column - len(phase_title) + 2
+ if phase in stats.custom['_run']:
+ phase_status = stats.custom['_run'][phase]
+ self._display.display(
+ '{}{}: {}'.format(phase_title, ' ' * padding, phase_status),
+ color=self.phase_color(phase_status))
+ if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
+ self._display.display(
+ '\tThis phase can be restarted by running: {}'.format(
+ phase_attributes[phase]['playbook']))
+ else:
+ # Phase was not found in custom stats
+ self._display.display(
+ '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
+ color=C.COLOR_SKIP)
+
+ self._display.display("", screen_only=True)
+
+ def phase_color(self, status):
+ """ Return color code for installer phase"""
+ valid_status = [
+ 'In Progress',
+ 'Complete',
+ ]
+
+ if status not in valid_status:
+ self._display.warning('Invalid phase status defined: {}'.format(status))
+
+ if status == 'Complete':
+ phase_color = C.COLOR_OK
+ elif status == 'In Progress':
+ phase_color = C.COLOR_ERROR
+ else:
+ phase_color = C.COLOR_WARN
+
+ return phase_color
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 45d7444a4..05b2763d5 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -745,7 +745,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1421,7 +1421,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 231857cca..d1dc4caf8 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -723,7 +723,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1399,7 +1399,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 44f3f57d8..152f270ab 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 687cff579..3082f5890 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index ddf5d90b7..1ceaf5d0d 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -717,7 +717,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1393,7 +1393,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index c00eee381..0771aa5a5 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -835,7 +835,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1511,7 +1511,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
@@ -1886,13 +1886,15 @@ class SecretConfig(object):
namespace,
kubeconfig,
secrets=None,
- stype=None):
+ stype=None,
+ annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
+ self.annotations = annotations
self.data = {}
self.create_dict()
@@ -1909,6 +1911,8 @@ class SecretConfig(object):
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
+ if self.annotations:
+ self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 0c925ab0b..146f71f68 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -860,7 +860,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1536,7 +1536,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
@@ -2230,13 +2230,15 @@ class SecretConfig(object):
namespace,
kubeconfig,
secrets=None,
- stype=None):
+ stype=None,
+ annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
+ self.annotations = annotations
self.data = {}
self.create_dict()
@@ -2253,6 +2255,8 @@ class SecretConfig(object):
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
+ if self.annotations:
+ self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 567ecfd4e..9761b4b4e 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -709,7 +709,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1385,7 +1385,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 9515de569..047edffbb 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -715,7 +715,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1391,7 +1391,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index d461e5ae9..0b6a8436b 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -759,7 +759,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1435,7 +1435,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 22ad58725..1f52fba40 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -726,7 +726,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1402,7 +1402,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index b6c6e47d9..1b63a6c13 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -699,7 +699,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1375,7 +1375,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index f7fc286e0..94b08d9ce 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -718,7 +718,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1394,7 +1394,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 2206878a4..ad837fdb5 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -735,7 +735,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1411,7 +1411,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 126d7a617..892546e56 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -738,7 +738,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1414,7 +1414,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index d20904d0d..38df585f0 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -670,7 +670,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1346,7 +1346,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 91199d093..70632f86d 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -727,7 +727,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1403,7 +1403,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index f9b2d81fa..4eee748d7 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -724,7 +724,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1400,7 +1400,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 895322ba5..2e73a7645 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -731,7 +731,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1407,7 +1407,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 8f8e46e1e..e003770d8 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -90,6 +90,12 @@ options:
required: false
default: str
aliases: []
+ labels:
+ description:
+ - The labels to apply on the route
+ required: false
+ default: None
+ aliases: []
tls_termination:
description:
- The options for termination. e.g. reencrypt
@@ -769,7 +775,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1445,7 +1451,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
@@ -1469,6 +1475,7 @@ class RouteConfig(object):
sname,
namespace,
kubeconfig,
+ labels=None,
destcacert=None,
cacert=None,
cert=None,
@@ -1483,6 +1490,7 @@ class RouteConfig(object):
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
+ self.labels = labels
self.host = host
self.tls_termination = tls_termination
self.destcacert = destcacert
@@ -1508,6 +1516,8 @@ class RouteConfig(object):
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
+ if self.labels:
+ self.data['metadata']['labels'] = self.labels
self.data['spec'] = {}
self.data['spec']['host'] = self.host
@@ -1715,6 +1725,7 @@ class OCRoute(OpenShiftCLI):
rconfig = RouteConfig(params['name'],
params['namespace'],
params['kubeconfig'],
+ params['labels'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
@@ -1819,6 +1830,7 @@ def main():
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
+ labels=dict(default=None, type='dict'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
tls_termination=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 7130cc5fc..c142f1f43 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -713,7 +713,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1389,7 +1389,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 0c4b99e30..0614f359d 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -90,6 +90,12 @@ options:
required: false
default: default
aliases: []
+ annotations:
+ description:
+ - Annotations to apply to the object
+ required: false
+ default: None
+ aliases: []
files:
description:
- A list of files provided for secrets
@@ -765,7 +771,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1441,7 +1447,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
@@ -1464,13 +1470,15 @@ class SecretConfig(object):
namespace,
kubeconfig,
secrets=None,
- stype=None):
+ stype=None,
+ annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
+ self.annotations = annotations
self.data = {}
self.create_dict()
@@ -1487,6 +1495,8 @@ class SecretConfig(object):
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
+ if self.annotations:
+ self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
@@ -1698,8 +1708,7 @@ class OCSecret(OpenShiftCLI):
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
- return {'failed': True,
- 'msg': 'Either specify files or contents.'}
+ files = [{'name': 'null', 'path': os.devnull}]
########
# Create
@@ -1783,6 +1792,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 7ab139e85..3e8aea4f1 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -772,7 +772,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1448,7 +1448,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 5d539ced4..646a39224 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 97e213f46..99a8e8f3d 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -711,7 +711,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1387,7 +1387,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 9339a85e5..e88f3ae8d 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -729,7 +729,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1405,7 +1405,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 2fa349547..7bbe38819 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -771,7 +771,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1447,7 +1447,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 55e1054e7..63adbd6ac 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -683,7 +683,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1359,7 +1359,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 63bad57b4..3c07f8d4b 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -760,7 +760,7 @@ class Yedit(object): # pragma: no cover
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
@@ -1436,7 +1436,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py
index f2f5c5095..969cf8bcd 100644
--- a/roles/lib_openshift/src/ansible/oc_route.py
+++ b/roles/lib_openshift/src/ansible/oc_route.py
@@ -13,6 +13,7 @@ def main():
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
+ labels=dict(default=None, type='dict'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
tls_termination=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py
index faa7c1772..ee2827e69 100644
--- a/roles/lib_openshift/src/ansible/oc_secret.py
+++ b/roles/lib_openshift/src/ansible/oc_secret.py
@@ -15,6 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index 3a1bd732f..dc2f7977b 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -118,6 +118,7 @@ class OCRoute(OpenShiftCLI):
rconfig = RouteConfig(params['name'],
params['namespace'],
params['kubeconfig'],
+ params['labels'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index 4ee6443e9..5322d6241 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -142,8 +142,7 @@ class OCSecret(OpenShiftCLI):
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
- return {'failed': True,
- 'msg': 'Either specify files or contents.'}
+ files = [{'name': 'null', 'path': os.devnull}]
########
# Create
diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route
index a12999c9e..f0d38ab5f 100644
--- a/roles/lib_openshift/src/doc/route
+++ b/roles/lib_openshift/src/doc/route
@@ -39,6 +39,12 @@ options:
required: false
default: str
aliases: []
+ labels:
+ description:
+ - The labels to apply on the route
+ required: false
+ default: None
+ aliases: []
tls_termination:
description:
- The options for termination. e.g. reencrypt
diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret
index 76b147f6f..a27f90f38 100644
--- a/roles/lib_openshift/src/doc/secret
+++ b/roles/lib_openshift/src/doc/secret
@@ -39,6 +39,12 @@ options:
required: false
default: default
aliases: []
+ annotations:
+ description:
+ - Annotations to apply to the object
+ required: false
+ default: None
+ aliases: []
files:
description:
- A list of files provided for secrets
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 5a307cdb3..1fb32164e 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -597,7 +597,7 @@ class OpenShiftCLIConfig(object):
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
- and (data['value'] or isinstance(data['value'], int)):
+ and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py
index 3b54a24fb..b106866cb 100644
--- a/roles/lib_openshift/src/lib/route.py
+++ b/roles/lib_openshift/src/lib/route.py
@@ -11,6 +11,7 @@ class RouteConfig(object):
sname,
namespace,
kubeconfig,
+ labels=None,
destcacert=None,
cacert=None,
cert=None,
@@ -25,6 +26,7 @@ class RouteConfig(object):
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
+ self.labels = labels
self.host = host
self.tls_termination = tls_termination
self.destcacert = destcacert
@@ -50,6 +52,8 @@ class RouteConfig(object):
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
+ if self.labels:
+ self.data['metadata']['labels'] = self.labels
self.data['spec'] = {}
self.data['spec']['host'] = self.host
diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py
index a1c202442..ad4b6aa36 100644
--- a/roles/lib_openshift/src/lib/secret.py
+++ b/roles/lib_openshift/src/lib/secret.py
@@ -10,13 +10,15 @@ class SecretConfig(object):
namespace,
kubeconfig,
secrets=None,
- stype=None):
+ stype=None,
+ annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
+ self.annotations = annotations
self.data = {}
self.create_dict()
@@ -33,6 +35,8 @@ class SecretConfig(object):
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
+ if self.annotations:
+ self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py
index f350bd25d..f350bd25d 100644
--- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
+++ b/roles/lib_openshift/src/test/integration/filter_plugins/test_filters.py
diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml
index c0d200e73..6a452ccec 100755
--- a/roles/lib_openshift/src/test/integration/oc_configmap.yml
+++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml
@@ -55,7 +55,7 @@
config: "{{ filename }}"
from_literal:
foo: notbar
- deployment_type: online
+ deployment_type: openshift-enterprise
- name: fetch the updated configmap
oc_configmap:
@@ -70,7 +70,7 @@
assert:
that:
- cmout.results.results[0].metadata.name == 'configmaptest'
- - cmout.results.results[0].data.deployment_type == 'online'
+ - cmout.results.results[0].data.deployment_type == 'openshift-enterprise'
- cmout.results.results[0].data.foo == 'notbar'
###### end update test ###########
diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
index 318fd6167..27042c64b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_configmap.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
@@ -79,7 +79,7 @@ class OCConfigMapTest(unittest.TestCase):
''' Testing a configmap create '''
params = copy.deepcopy(OCConfigMapTest.params)
params['from_file'] = {'test': '/root/file'}
- params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'}
+ params['from_literal'] = {'foo': 'bar', 'deployment_type': 'openshift-enterprise'}
configmap = '''{
"apiVersion": "v1",
@@ -100,7 +100,7 @@ class OCConfigMapTest(unittest.TestCase):
"apiVersion": "v1",
"data": {
"foo": "bar",
- "deployment_type": "online",
+ "deployment_type": "openshift-enterprise",
"test": "this is a file\\n"
},
"kind": "ConfigMap",
@@ -128,7 +128,7 @@ class OCConfigMapTest(unittest.TestCase):
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap')
- self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online')
+ self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'openshift-enterprise')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py
index afdb5e4dc..5699f123b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_route.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_route.py
@@ -39,6 +39,7 @@ class OCRouteTest(unittest.TestCase):
'debug': False,
'name': 'test',
'namespace': 'default',
+ 'labels': {'route': 'route'},
'tls_termination': 'passthrough',
'dest_cacert_path': None,
'cacert_path': None,
@@ -64,7 +65,10 @@ class OCRouteTest(unittest.TestCase):
"selfLink": "/oapi/v1/namespaces/default/routes/test",
"uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26",
"resourceVersion": "439182",
- "creationTimestamp": "2017-02-07T01:59:48Z"
+ "creationTimestamp": "2017-02-07T01:59:48Z",
+ "labels": {
+ "route": "route"
+ }
},
"spec": {
"host": "test.example",
@@ -141,6 +145,7 @@ class OCRouteTest(unittest.TestCase):
'debug': False,
'name': 'test',
'namespace': 'default',
+ 'labels': {'route': 'route'},
'tls_termination': 'edge',
'dest_cacert_path': None,
'cacert_path': None,
@@ -166,7 +171,8 @@ class OCRouteTest(unittest.TestCase):
"namespace": "default",
"resourceVersion": "517745",
"selfLink": "/oapi/v1/namespaces/default/routes/test",
- "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a"
+ "uid": "b6f25898-ed77-11e6-9755-0e737db1e63a",
+ "labels": {"route": "route"}
},
"spec": {
"host": "test.openshift.com",
@@ -250,6 +256,7 @@ metadata:
self.assertTrue(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels']['route'], 'route')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..cf5c2e423 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -792,7 +793,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 957c35a06..0a4fbe07a 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -590,7 +590,7 @@ class Yedit(object):
yamlfile.yaml_dict = content
if params['key']:
- rval = yamlfile.get(params['key']) or {}
+ rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index 3da340c85..e2f7af5ad 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,8 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: nuage_ca
-- role: nuage_common
-- role: openshift_etcd_client_certificates
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
index 612d689c2..7be5d6743 100755
--- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -62,16 +62,14 @@ spec:
selector:
matchLabels:
k8s-app: nuage-master-config
+ updateStrategy:
+ type: RollingUpdate
template:
metadata:
labels:
k8s-app: nuage-master-config
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
nodeSelector:
install-monitor: "true"
containers:
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
index 02e9a1563..6a1267d94 100755
--- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -23,7 +23,7 @@ data:
# IP address and port number of master API server
masterApiServer: {{ api_server_url }}
# REST server URL
- nuageMonRestServer: {{ nuage_mon_rest_server_url }}
+ nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}
# Bridge name for the docker bridge
dockerBridgeName: docker0
# Certificate for connecting to the openshift monitor REST api
@@ -32,11 +32,6 @@ data:
nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key
# CA certificate for verifying the master's rest server
nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt
- # Nuage vport mtu size
- interfaceMTU: {{ nuage_vport_mtu }}
- # Logging level for the plugin
- # allowed options are: "dbg", "info", "warn", "err", "emer", "off"
- logLevel: 3
# This will generate the required Nuage CNI yaml configuration
cni_yaml_config: |
@@ -72,10 +67,6 @@ spec:
k8s-app: nuage-cni-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage CNI binaries
# and CNI network config file on each node.
@@ -157,10 +148,6 @@ spec:
k8s-app: nuage-vrs-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage VRS running as a
# container on each worker node
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index d8bfca62a..fdf01b7c2 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
-nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
+nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 696efbea5..4aca5c7a8 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -1,7 +1,29 @@
openshift_aws
==================================
-Provision AWS infrastructure helpers.
+Provision AWS infrastructure and instances.
+
+This role contains many task-areas to provision resources and perform actions
+against an AWS account for the purposes of dynamically building an openshift
+cluster.
+
+This role is primarily intended to be used with "include_role" and "tasks_from".
+
+include_role can be called from the tasks section in a play. See example
+playbook below for reference.
+
+These task-areas are:
+
+* provision a vpc: vpc.yml
+* provision elastic load balancers: elb.yml
+* upload IAM ssl certificates to use with load balancers: iam_cert.yml
+* provision an S3 bucket: s3.yml
+* provision an instance to build an AMI: provision_instance.yml
+* provision a security group in AWS: security_group.yml
+* provision ssh keys and users in AWS: ssh_keys.yml
+* provision an AMI in AWS: seal_ami.yml
+* provision scale groups: scale_group.yml
+* provision launch configs: launch_config.yml
Requirements
------------
@@ -9,57 +31,9 @@ Requirements
* Ansible 2.3
* Boto
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value
-|---------------------------------------------------|-----------------------
-| openshift_aws_clusterid | default
-| openshift_aws_elb_scheme | internet-facing
-| openshift_aws_launch_config_bootstrap_token | ''
-| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
-| openshift_aws_ami_copy_wait | False
-| openshift_aws_users | []
-| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_create_vpc | False
-| openshift_aws_node_group_type | master
-| openshift_aws_elb_cert_arn | ''
-| openshift_aws_kubernetes_cluster_status | owned
-| openshift_aws_s3_mode | create
-| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_ssh_keys | False
-| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
-| openshift_aws_use_custom_ami | False
-| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
-| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
-| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
-| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
-| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_security_groups | False
-| openshift_aws_create_iam_cert | False
-| openshift_aws_create_scale_group | True
-| openshift_aws_ami_encrypt | False
-| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
-| openshift_aws_region | us-east-1
-| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_elb_idle_timout | 400
-| openshift_aws_subnet_name | us-east-1c
-| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
-| openshift_aws_create_launch_config | True
-| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
-| openshift_aws_ami_name | openshift-gi
-| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
-|
-
-
-Dependencies
-------------
+Appropriate AWS credentials and permissions are required.
+
+
Example Playbook
@@ -72,7 +46,6 @@ Example Playbook
vars:
openshift_aws_clusterid: test
openshift_aws_region: us-east-1
- openshift_aws_create_vpc: true
```
License
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 4e7f54f79..ea09857b0 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -1,5 +1,4 @@
---
-openshift_aws_create_vpc: True
openshift_aws_create_s3: True
openshift_aws_create_iam_cert: True
openshift_aws_create_security_groups: True
@@ -13,10 +12,10 @@ openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
-openshift_aws_iam_cert_chain_path: ''
openshift_aws_iam_cert_key_path: ''
openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
@@ -143,6 +142,11 @@ openshift_aws_elb_instance_filter:
"tag:host-type": "{{ openshift_aws_node_group_type }}"
instance-state-name: running
+openshift_aws_launch_config_security_groups:
+- "{{ openshift_aws_clusterid }}" # default sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+
openshift_aws_node_security_groups:
default:
name: "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index 06e1f9602..06e1f9602 100644
--- a/roles/openshift_aws/filter_plugins/filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index a1fdd66fc..7bc3184df 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -29,9 +29,9 @@
if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
else openshift_aws_elb_listeners }}"
-- name: "Create ELB {{ openshift_aws_elb_name }}"
+- name: "Create ELB {{ l_openshift_aws_elb_name }}"
ec2_elb_lb:
- name: "{{ openshift_aws_elb_name }}"
+ name: "{{ l_openshift_aws_elb_name }}"
state: present
security_group_names: "{{ openshift_aws_elb_security_groups }}"
idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
@@ -49,10 +49,10 @@
# It is necessary to ignore_errors here because the instances are not in 'ready'
# state when first added to ELB
-- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}"
ec2_elb:
instance_id: "{{ item.id }}"
- ec2_elbs: "{{ openshift_aws_elb_name }}"
+ ec2_elbs: "{{ l_openshift_aws_elb_name }}"
state: present
region: "{{ openshift_aws_region }}"
wait: False
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
index cd9772a25..f74a62b8b 100644
--- a/roles/openshift_aws/tasks/iam_cert.yml
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -11,17 +11,23 @@
- "'failed' in elb_cert_chain"
- elb_cert_chain.failed
- "'msg' in elb_cert_chain"
- - "'already exists and has a different certificate body' in elb_cert_chain.msg"
- - "'BotoServerError' in elb_cert_chain.msg"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg or 'BotoServerError' in elb_cert_chain.msg or 'Traceback' in elb_cert_chain.msg.module_stderr"
when:
- openshift_aws_create_iam_cert | bool
- openshift_aws_iam_cert_path != ''
- openshift_aws_iam_cert_key_path != ''
- openshift_aws_elb_cert_arn == ''
+- debug: msg="{{ elb_cert_chain }}"
+
- name: set_fact openshift_aws_elb_cert_arn
set_fact:
openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
- name: wait for cert to propagate
pause:
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 65c5a6cc0..e6be9969c 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -4,13 +4,18 @@
when:
- openshift_aws_ami is undefined
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
- name: fetch the security groups for launch config
ec2_group_facts:
filters:
- group-name:
- - "{{ openshift_aws_clusterid }}" # default sg
- - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
- - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ group-name: "{{ openshift_aws_launch_config_security_groups }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
region: "{{ openshift_aws_region }}"
register: ec2sgs
@@ -21,7 +26,7 @@
region: "{{ openshift_aws_region }}"
image_id: "{{ openshift_aws_ami }}"
instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
- security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
user_data: |-
#cloud-config
{% if openshift_aws_node_group_type != 'master' %}
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
new file mode 100644
index 000000000..737cfc7a6
--- /dev/null
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -0,0 +1,22 @@
+---
+- name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ openshift_aws_region }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ openshift_aws_elb_name }}-external"
+ - "{{ openshift_aws_elb_name }}-internal"
+ delegate_to: localhost
+ register: elbs
+
+- debug: var=elbs
+
+- name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 189caeaee..a8518d43a 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,16 +1,8 @@
---
-- when: openshift_aws_create_vpc | bool
- name: create default vpc
- include: vpc.yml
-
- when: openshift_aws_create_iam_cert | bool
name: create the iam_cert for elb certificate
include: iam_cert.yml
-- when: openshift_aws_users | length > 0
- name: create aws ssh keypair
- include: ssh_keys.yml
-
- when: openshift_aws_create_s3 | bool
name: create s3 bucket for registry
include: s3.yml
@@ -34,14 +26,14 @@
include: elb.yml
vars:
openshift_aws_elb_direction: internal
- openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal"
openshift_aws_elb_scheme: internal
- name: create our master external load balancers
include: elb.yml
vars:
openshift_aws_elb_direction: external
- openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external"
openshift_aws_elb_scheme: internet-facing
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 8d4e5ac43..1384bae59 100644
--- a/roles/openshift_aws/tasks/build_ami.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -1,16 +1,4 @@
---
-- when: openshift_aws_create_vpc | bool
- name: create a vpc
- include: vpc.yml
-
-- when: openshift_aws_users | length > 0
- name: create aws ssh keypair
- include: ssh_keys.yml
-
-- when: openshift_aws_create_security_groups | bool
- name: Create compute security_groups
- include: security_group.yml
-
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -31,9 +19,9 @@
assign_public_ip: yes
region: "{{ openshift_aws_region }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
- group: "{{ openshift_aws_clusterid }}"
+ group: "{{ openshift_aws_build_ami_group }}"
instance_type: m4.xlarge
- vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
volumes:
- device_name: /dev/sdb
@@ -46,3 +34,30 @@
Name: "{{ openshift_aws_base_ami_name }}"
instance_tags:
Name: "{{ openshift_aws_base_ami_name }}"
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ instancesout.instances[0].public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+
+- name: Pause 10 seconds to ensure ssh actually accepts logins
+ pause:
+ seconds: 20
+
+- name: add host to nodes
+ add_host:
+ groups: nodes
+ name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
new file mode 100644
index 000000000..166f3b938
--- /dev/null
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -0,0 +1,35 @@
+---
+- name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid }}"
+
+- name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region }}"
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_dns_name }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_dns_name }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml
index ecfcc88b3..742b15df4 100644
--- a/roles/openshift_ca/defaults/main.yml
+++ b/roles/openshift_ca/defaults/main.yml
@@ -1,3 +1,11 @@
---
openshift_ca_cert_expire_days: 1825
openshift_master_cert_expire_days: 730
+
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index dfbdf0cc7..f8b784a63 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,4 +14,3 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
-- role: openshift_named_certificates
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index d04c1766d..4d80bf921 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -1,9 +1,2 @@
---
-openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
-openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
-openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
-openshift_version: "{{ openshift_pkg_version | default('') }}"
-
-openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md
deleted file mode 100644
index 8283afed6..000000000
--- a/roles/openshift_cfme/README.md
+++ /dev/null
@@ -1,404 +0,0 @@
-# OpenShift-Ansible - CFME Role
-
-# PROOF OF CONCEPT - Alpha Version
-
-This role is based on the work in the upstream
-[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
-project. For additional literature on configuration specific to
-ManageIQ (optional post-installation tasks), visit the project's
-[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration).
-
-Please submit a
-[new issue](https://github.com/openshift/openshift-ansible/issues/new)
-if you run into bugs with this role or wish to request enhancements.
-
-# Important Notes
-
-This is an early *proof of concept* role to install the Cloud Forms
-Management Engine (ManageIQ) on OpenShift Container Platform (OCP).
-
-* This role is still in **ALPHA STATUS**
-* Many options are hard-coded still (ex: NFS setup)
-* Not many configurable options yet
-* **Should** be ran on a dedicated cluster
-* **Will not run** on undersized infra
-* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable
-
-## Requirements
-
-**NOTE:** These requirements are copied from the upstream
-[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
-project.
-
-### Prerequisites:
-
-*
- [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html)
- or
- [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html)
- provisioned
-* NFS or other compatible volume provider
-* A cluster-admin user (created by role if required)
-
-### Cluster Sizing
-
-In order to avoid random deployment failures due to resource
-starvation, we recommend a minimum cluster size for a **test**
-environment.
-
-| Type | Size | CPUs | Memory |
-|----------------|---------|----------|----------|
-| Masters | `1+` | `8` | `12GB` |
-| Nodes | `2+` | `4` | `8GB` |
-| PV Storage | `25GB` | `N/A` | `N/A` |
-
-
-![Basic CFME Deployment](img/CFMEBasicDeployment.png)
-
-**CFME has hard-requirements for memory. CFME will NOT install if your
- infrastructure does not meet or exceed the requirements given
- above. Do not run this playbook if you do not have the required
- memory, you will just waste your time.**
-
-
-### Other sizing considerations
-
-* Recommendations assume MIQ will be the **only application running**
- on this cluster.
-* Alternatively, you can provision an infrastructure node to run
- registry/metrics/router/logging pods.
-* Each MIQ application pod will consume at least `3GB` of RAM on initial
- deployment (blank deployment without providers).
-* RAM consumption will ramp up higher depending on appliance use, once
- providers are added expect higher resource consumption.
-
-
-### Assumptions
-
-1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements
-1) Your NFS server is on your master host
-1) Your PV backing NFS storage volume is mounted on `/exports/`
-
-Required directories that NFS will export to back the PVs:
-
-* `/exports/miq-pv0[123]`
-
-If the required directories are not present at install-time, they will
-be created using the recommended permissions per the
-[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data):
-
-* UID/GID: `root`/`root`
-* Mode: `0775`
-
-**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS
- storage, **ensure** it is mounted on `/exports/` **before** running
- this role.
-
-
-
-## Role Variables
-
-Core variables in this role:
-
-| Name | Default value | Description |
-|-------------------------------|---------------|---------------|
-| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding |
-
-
-Variables you may override have defaults defined in
-[defaults/main.yml](defaults/main.yml).
-
-
-# Important Notes
-
-This is a **tech preview** status role presently. Use it with the same
-caution you would give any other pre-release software.
-
-**Most importantly** follow this one rule: don't re-run the entrypoint
-playbook multiple times in a row without cleaning up after previous
-runs if some of the CFME steps have ran. This is a known
-flake. Cleanup instructions are provided at the bottom of this README.
-
-
-# Usage
-
-This section describes the basic usage of this role. All parameters
-will use their [default values](defaults/main.yml).
-
-## Pre-flight Checks
-
-**IMPORTANT:** As documented above in [the prerequisites](#prerequisites),
- you **must already** have your OCP cluster up and running.
-
-**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to
-save some spin-up time post-deployment, you can begin pre-pulling the
-docker image to each of your nodes now:
-
-```
-root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
-```
-
-## Getting Started
-
-1) The *entry point playbook* to install CFME is located in
-[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml)
-directory
-
-2) Update your existing `hosts` inventory file and ensure the
-parameter `openshift_cfme_install_app` is set to `True` under the
-`[OSEv3:vars]` block.
-
-2) Using your existing `hosts` inventory file, run `ansible-playbook`
-with the entry point playbook:
-
-```
-$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml
-```
-
-## Next Steps
-
-Once complete, the playbook will let you know:
-
-
-```
-TASK [openshift_cfme : Status update] *********************************************************
-ok: [ho.st.na.me] => {
- "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n"
-}
-```
-
-This will take several minutes (*possibly 10 or more*, depending on
-your network connection). However, you can get some insight into the
-deployment process during initialization.
-
-### oc describe pod manageiq-0
-
-*Some useful information about the output you will see if you run the
-`oc describe pod manageiq-0` command*
-
-**Readiness probe**s - These will take a while to become
-`Healthy`. The initial health probes won't even happen for at least 8
-minutes depending on how long it takes you to pull down the large
-images. ManageIQ is a large application so it may take a considerable
-amount of time for it to deploy and be marked as `Healthy`.
-
-If you go to the node you know the application is running on (check
-for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe`
-output) you can run a `docker pull` command to monitor the progress of
-the image pull:
-
-```
-[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
-Trying to pull repository docker.io/manageiq/manageiq-pods ...
-sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods
-Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a
-Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine
-```
-
-The example above demonstrates the case where the image has been
-successfully pulled already.
-
-If the image isn't completely pulled already then you will see
-multiple progress bars detailing each image layer download status.
-
-
-### rsh
-
-*Useful inspection/progress monitoring techniques with the `oc rsh`
-command.*
-
-
-On your master node, switch to the `cfme` project (or whatever you
-named it if you overrode the `openshift_cfme_project` variable) and
-check on the pod states:
-
-```
-[root@cfme-master01 ~]# oc project cfme
-Now using project "cfme" on server "https://10.10.0.100:8443".
-
-[root@cfme-master01 ~]# oc get pod
-NAME READY STATUS RESTARTS AGE
-manageiq-0 0/1 Running 0 14m
-memcached-1-3lk7g 1/1 Running 0 14m
-postgresql-1-12slb 1/1 Running 0 14m
-```
-
-Note how the `manageiq-0` pod says `0/1` under the **READY**
-column. After some time (depending on your network connection) you'll
-be able to `rsh` into the pod to find out more of what's happening in
-real time. First, the easy-mode command, run this once `rsh` is
-available and then watch until it says `Started Initialize Appliance
-Database`:
-
-```
-[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service
-```
-
-For the full explanation of what this means, and more interactive
-inspection techniques, keep reading on.
-
-To obtain a shell on our `manageiq` pod we use this command:
-
-```
-[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l
-```
-
-The `rsh` command opens a shell in your pod for you. In this case it's
-the pod called `manageiq-0`. `systemd` is managing the services in
-this pod so we can use the `list-units` command to see what is running
-currently: `# systemctl list-units | grep appliance`.
-
-If you see the `appliance-initialize` service running, this indicates
-that basic setup is still in progress. We can monitor the process with
-the `journalctl` command like so:
-
-
-```
-[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config ==
-Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance ==
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database...
-Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting
-Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete
-Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping
-Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service.
-Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database.
-```
-
-Most of what we see here (above) is the initial database seeding
-process. This process isn't very quick, so be patient.
-
-At the bottom of the log there is a special line from the `systemctl`
-service, `Removed symlink
-/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The
-`appliance-initialize` service is no longer marked as enabled. This
-indicates that the base application initialization is complete now.
-
-We're not done yet though, there are other ancillary services which
-run in this pod to support the application. *Still in the rsh shell*,
-Use the `ps` command to monitor for the `httpd` processes
-starting. You will see output similar to the following when that stage
-has completed:
-
-```
-[root@manageiq-0 vmdb]# ps aux | grep http
-root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-```
-
-Furthermore, you can find other related processes by just looking for
-ones with `MIQ` in their name:
-
-```
-[root@manageiq-0 vmdb]# ps aux | grep miq
-root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server
-root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic
-root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic
-root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic
-root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic
-root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5
-root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems
-root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting
-root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting
-root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker]
-root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker]
-root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker]
-root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq
-```
-
-Finally, *still in the rsh shell*, to test if the application is
-running correctly, we can request the application homepage. If the
-page is available the page title will be `ManageIQ: Login`:
-
-```
-[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>'
-<title>
-ManageIQ: Login
-</title>
-```
-
-**Note:** The `-s` flag makes `curl` operations silent and the `-k`
-flag to ignore errors about untrusted certificates.
-
-
-
-# Additional Upstream Resources
-
-Below are some useful resources from the upstream project
-documentation. You may find these of value.
-
-* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
-* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes)
-* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
-
-
-# Manual Cleanup
-
-At this time uninstallation/cleanup is still a manual process. You
-will have to follow a few steps to fully remove CFME from your
-cluster.
-
-Delete the project:
-
-* `oc delete project cfme`
-
-Delete the PVs:
-
-* `oc delete pv miq-pv01`
-* `oc delete pv miq-pv02`
-* `oc delete pv miq-pv03`
-
-Clean out the old PV data:
-
-* `cd /exports/`
-* `find miq* -type f -delete`
-* `find miq* -type d -delete`
-
-Remove the NFS exports:
-
-* `rm /etc/exports.d/openshift_cfme.exports`
-* `exportfs -ar`
-
-Delete the user:
-
-* `oc delete user cfme`
-
-**NOTE:** The `oc delete project cfme` command will return quickly
-however it will continue to operate in the background. Continue
-running `oc get project` after you've completed the other steps to
-monitor the pods and final project termination progress.
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
deleted file mode 100644
index b82c2e602..000000000
--- a/roles/openshift_cfme/defaults/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# Namespace for the CFME project (Note: changed post-3.6 to use
-# reserved 'openshift-' namespace prefix)
-openshift_cfme_project: openshift-cfme
-# Namespace/project description
-openshift_cfme_project_description: ManageIQ - CloudForms Management Engine
-# Basic user assigned the `admin` role for the project
-openshift_cfme_user: cfme
-# Project system account for enabling privileged pods
-openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default"
-# All the required exports
-openshift_cfme_pv_exports:
- - miq-pv01
- - miq-pv02
- - miq-pv03
-# PV template files and their created object names
-openshift_cfme_pv_data:
- - pv_name: miq-pv01
- pv_template: miq-pv-db.yaml
- pv_label: CFME DB PV
- - pv_name: miq-pv02
- pv_template: miq-pv-region.yaml
- pv_label: CFME Region PV
- - pv_name: miq-pv03
- pv_template: miq-pv-server.yaml
- pv_label: CFME Server PV
-
-# Tuning parameter to use more than 5 images at once from an ImageStream
-openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# TODO: Refactor '_install_app' variable. This is just for testing but
-# maybe in the future it should control the entire yes/no for CFME.
-#
-# Whether or not the manageiq app should be initialized ('oc new-app
-# --template=manageiq). If False everything UP TO 'new-app' is ran.
-openshift_cfme_install_app: False
-# Docker image to pull
-openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}"
-openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}"
-openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}"
diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml
deleted file mode 100644
index 8f0d2af38..000000000
--- a/roles/openshift_cfme/files/miq-template.yaml
+++ /dev/null
@@ -1,566 +0,0 @@
----
-path: /tmp/miq-template-out
-data:
- apiVersion: v1
- kind: Template
- labels:
- template: manageiq
- metadata:
- name: manageiq
- annotations:
- description: "ManageIQ appliance with persistent storage"
- tags: "instant-app,manageiq,miq"
- iconClass: "icon-rails"
- objects:
- - apiVersion: v1
- kind: Secret
- metadata:
- name: "${NAME}-secrets"
- stringData:
- pg-password: "${DATABASE_PASSWORD}"
- - apiVersion: v1
- kind: Service
- metadata:
- annotations:
- description: "Exposes and load balances ManageIQ pods"
- service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
- name: ${NAME}
- spec:
- clusterIP: None
- ports:
- - name: http
- port: 80
- protocol: TCP
- targetPort: 80
- - name: https
- port: 443
- protocol: TCP
- targetPort: 443
- selector:
- name: ${NAME}
- - apiVersion: v1
- kind: Route
- metadata:
- name: ${NAME}
- spec:
- host: ${APPLICATION_DOMAIN}
- port:
- targetPort: https
- tls:
- termination: passthrough
- to:
- kind: Service
- name: ${NAME}
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-app
- annotations:
- description: "Keeps track of the ManageIQ image changes"
- spec:
- dockerImageRepository: "${APPLICATION_IMG_NAME}"
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-postgresql
- annotations:
- description: "Keeps track of the PostgreSQL image changes"
- spec:
- dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-memcached
- annotations:
- description: "Keeps track of the Memcached image changes"
- spec:
- dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-${DATABASE_SERVICE_NAME}"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${DATABASE_VOLUME_CAPACITY}
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-region"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
- - apiVersion: apps/v1beta1
- kind: "StatefulSet"
- metadata:
- name: ${NAME}
- annotations:
- description: "Defines how to deploy the ManageIQ appliance"
- spec:
- serviceName: "${NAME}"
- replicas: "${APPLICATION_REPLICA_COUNT}"
- template:
- metadata:
- labels:
- name: ${NAME}
- name: ${NAME}
- spec:
- containers:
- - name: manageiq
- image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
- livenessProbe:
- tcpSocket:
- port: 443
- initialDelaySeconds: 480
- timeoutSeconds: 3
- readinessProbe:
- httpGet:
- path: /
- port: 443
- scheme: HTTPS
- initialDelaySeconds: 200
- timeoutSeconds: 3
- ports:
- - containerPort: 80
- protocol: TCP
- - containerPort: 443
- protocol: TCP
- securityContext:
- privileged: true
- volumeMounts:
- -
- name: "${NAME}-server"
- mountPath: "/persistent"
- -
- name: "${NAME}-region"
- mountPath: "/persistent-region"
- env:
- -
- name: "APPLICATION_INIT_DELAY"
- value: "${APPLICATION_INIT_DELAY}"
- -
- name: "DATABASE_SERVICE_NAME"
- value: "${DATABASE_SERVICE_NAME}"
- -
- name: "DATABASE_REGION"
- value: "${DATABASE_REGION}"
- -
- name: "MEMCACHED_SERVICE_NAME"
- value: "${MEMCACHED_SERVICE_NAME}"
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- valueFrom:
- secretKeyRef:
- name: "${NAME}-secrets"
- key: "pg-password"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${APPLICATION_MEM_REQ}"
- cpu: "${APPLICATION_CPU_REQ}"
- limits:
- memory: "${APPLICATION_MEM_LIMIT}"
- lifecycle:
- preStop:
- exec:
- command:
- - /opt/manageiq/container-scripts/sync-pv-data
- volumes:
- -
- name: "${NAME}-region"
- persistentVolumeClaim:
- claimName: ${NAME}-region
- volumeClaimTemplates:
- - metadata:
- name: "${NAME}-server"
- annotations:
- # Uncomment this if using dynamic volume provisioning.
- # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
- # volume.alpha.kubernetes.io/storage-class: anything
- spec:
- accessModes: [ ReadWriteOnce ]
- resources:
- requests:
- storage: "${APPLICATION_VOLUME_CAPACITY}"
- - apiVersion: v1
- kind: "Service"
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- annotations:
- description: "Exposes the memcached server"
- spec:
- ports:
- -
- name: "memcached"
- port: 11211
- targetPort: 11211
- selector:
- name: "${MEMCACHED_SERVICE_NAME}"
- - apiVersion: v1
- kind: "DeploymentConfig"
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- annotations:
- description: "Defines how to deploy memcached"
- spec:
- strategy:
- type: "Recreate"
- triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "memcached"
- from:
- kind: "ImageStreamTag"
- name: "miq-memcached:${MEMCACHED_IMG_TAG}"
- -
- type: "ConfigChange"
- replicas: 1
- selector:
- name: "${MEMCACHED_SERVICE_NAME}"
- template:
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- labels:
- name: "${MEMCACHED_SERVICE_NAME}"
- spec:
- volumes: []
- containers:
- -
- name: "memcached"
- image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
- ports:
- -
- containerPort: 11211
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 5
- tcpSocket:
- port: 11211
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 30
- tcpSocket:
- port: 11211
- volumeMounts: []
- env:
- -
- name: "MEMCACHED_MAX_MEMORY"
- value: "${MEMCACHED_MAX_MEMORY}"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- value: "${MEMCACHED_MAX_CONNECTIONS}"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- value: "${MEMCACHED_SLAB_PAGE_SIZE}"
- resources:
- requests:
- memory: "${MEMCACHED_MEM_REQ}"
- cpu: "${MEMCACHED_CPU_REQ}"
- limits:
- memory: "${MEMCACHED_MEM_LIMIT}"
- - apiVersion: v1
- kind: "Service"
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- annotations:
- description: "Exposes the database server"
- spec:
- ports:
- -
- name: "postgresql"
- port: 5432
- targetPort: 5432
- selector:
- name: "${DATABASE_SERVICE_NAME}"
- - apiVersion: v1
- kind: "DeploymentConfig"
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- annotations:
- description: "Defines how to deploy the database"
- spec:
- strategy:
- type: "Recreate"
- triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "postgresql"
- from:
- kind: "ImageStreamTag"
- name: "miq-postgresql:${POSTGRESQL_IMG_TAG}"
- -
- type: "ConfigChange"
- replicas: 1
- selector:
- name: "${DATABASE_SERVICE_NAME}"
- template:
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- labels:
- name: "${DATABASE_SERVICE_NAME}"
- spec:
- volumes:
- -
- name: "miq-pgdb-volume"
- persistentVolumeClaim:
- claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
- containers:
- -
- name: "postgresql"
- image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
- ports:
- -
- containerPort: 5432
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 15
- exec:
- command:
- - "/bin/sh"
- - "-i"
- - "-c"
- - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 60
- tcpSocket:
- port: 5432
- volumeMounts:
- -
- name: "miq-pgdb-volume"
- mountPath: "/var/lib/pgsql/data"
- env:
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- valueFrom:
- secretKeyRef:
- name: "${NAME}-secrets"
- key: "pg-password"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${POSTGRESQL_MEM_REQ}"
- cpu: "${POSTGRESQL_CPU_REQ}"
- limits:
- memory: "${POSTGRESQL_MEM_LIMIT}"
-
- parameters:
- -
- name: "NAME"
- displayName: Name
- required: true
- description: "The name assigned to all of the frontend objects defined in this template."
- value: manageiq
- -
- name: "DATABASE_SERVICE_NAME"
- displayName: "PostgreSQL Service Name"
- required: true
- description: "The name of the OpenShift Service exposed for the PostgreSQL container."
- value: "postgresql"
- -
- name: "DATABASE_USER"
- displayName: "PostgreSQL User"
- required: true
- description: "PostgreSQL user that will access the database."
- value: "root"
- -
- name: "DATABASE_PASSWORD"
- displayName: "PostgreSQL Password"
- required: true
- description: "Password for the PostgreSQL user."
- from: "[a-zA-Z0-9]{8}"
- generate: expression
- -
- name: "DATABASE_NAME"
- required: true
- displayName: "PostgreSQL Database Name"
- description: "Name of the PostgreSQL database accessed."
- value: "vmdb_production"
- -
- name: "DATABASE_REGION"
- required: true
- displayName: "Application Database Region"
- description: "Database region that will be used for application."
- value: "0"
- -
- name: "MEMCACHED_SERVICE_NAME"
- required: true
- displayName: "Memcached Service Name"
- description: "The name of the OpenShift Service exposed for the Memcached container."
- value: "memcached"
- -
- name: "MEMCACHED_MAX_MEMORY"
- displayName: "Memcached Max Memory"
- description: "Memcached maximum memory for memcached object storage in MB."
- value: "64"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- displayName: "Memcached Max Connections"
- description: "Memcached maximum number of connections allowed."
- value: "1024"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- displayName: "Memcached Slab Page Size"
- description: "Memcached size of each slab page."
- value: "1m"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- displayName: "PostgreSQL Max Connections"
- description: "PostgreSQL maximum number of database connections allowed."
- value: "100"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- displayName: "PostgreSQL Shared Buffer Amount"
- description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "256MB"
- -
- name: "APPLICATION_CPU_REQ"
- displayName: "Application Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
- value: "1000m"
- -
- name: "POSTGRESQL_CPU_REQ"
- displayName: "PostgreSQL Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
- value: "500m"
- -
- name: "MEMCACHED_CPU_REQ"
- displayName: "Memcached Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
- value: "200m"
- -
- name: "APPLICATION_MEM_REQ"
- displayName: "Application Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Application container will need."
- value: "6144Mi"
- -
- name: "POSTGRESQL_MEM_REQ"
- displayName: "PostgreSQL Min RAM Requested"
- required: true
- description: "Minimum amount of memory the PostgreSQL container will need."
- value: "1024Mi"
- -
- name: "MEMCACHED_MEM_REQ"
- displayName: "Memcached Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Memcached container will need."
- value: "64Mi"
- -
- name: "APPLICATION_MEM_LIMIT"
- displayName: "Application Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Application container can consume."
- value: "16384Mi"
- -
- name: "POSTGRESQL_MEM_LIMIT"
- displayName: "PostgreSQL Max RAM Limit"
- required: true
- description: "Maximum amount of memory the PostgreSQL container can consume."
- value: "8192Mi"
- -
- name: "MEMCACHED_MEM_LIMIT"
- displayName: "Memcached Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Memcached container can consume."
- value: "256Mi"
- -
- name: "POSTGRESQL_IMG_NAME"
- displayName: "PostgreSQL Image Name"
- description: "This is the PostgreSQL image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "POSTGRESQL_IMG_TAG"
- displayName: "PostgreSQL Image Tag"
- description: "This is the PostgreSQL image tag/version requested to deploy."
- value: "postgresql-latest-fine"
- -
- name: "MEMCACHED_IMG_NAME"
- displayName: "Memcached Image Name"
- description: "This is the Memcached image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "MEMCACHED_IMG_TAG"
- displayName: "Memcached Image Tag"
- description: "This is the Memcached image tag/version requested to deploy."
- value: "memcached-latest-fine"
- -
- name: "APPLICATION_IMG_NAME"
- displayName: "Application Image Name"
- description: "This is the Application image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "APPLICATION_IMG_TAG"
- displayName: "Application Image Tag"
- description: "This is the Application image tag/version requested to deploy."
- value: "app-latest-fine"
- -
- name: "APPLICATION_DOMAIN"
- displayName: "Application Hostname"
- description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
- value: ""
- -
- name: "APPLICATION_REPLICA_COUNT"
- displayName: "Application Replica Count"
- description: "This is the number of Application replicas requested to deploy."
- value: "1"
- -
- name: "APPLICATION_INIT_DELAY"
- displayName: "Application Init Delay"
- required: true
- description: "Delay in seconds before we attempt to initialize the application."
- value: "15"
- -
- name: "APPLICATION_VOLUME_CAPACITY"
- displayName: "Application Volume Capacity"
- required: true
- description: "Volume space available for application data."
- value: "5Gi"
- -
- name: "APPLICATION_REGION_VOLUME_CAPACITY"
- displayName: "Application Region Volume Capacity"
- required: true
- description: "Volume space available for region application data."
- value: "5Gi"
- -
- name: "DATABASE_VOLUME_CAPACITY"
- displayName: "Database Volume Capacity"
- required: true
- description: "Volume space available for database."
- value: "15Gi"
diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports
deleted file mode 100644
index 5457d41fc..000000000
--- a/roles/openshift_cfme/files/openshift_cfme.exports
+++ /dev/null
@@ -1,3 +0,0 @@
-/exports/miq-pv01 *(rw,no_root_squash,no_wdelay)
-/exports/miq-pv02 *(rw,no_root_squash,no_wdelay)
-/exports/miq-pv03 *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml
deleted file mode 100644
index 7e90b09a4..000000000
--- a/roles/openshift_cfme/handlers/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-######################################################################
-# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml
-#
-# TODO: Use the consolidated 'openshift_handlers' role once it's ready
-# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782
-######################################################################
-
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- notify: Verify API Server
-
-- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png
deleted file mode 100644
index a89c1e325..000000000
--- a/roles/openshift_cfme/img/CFMEBasicDeployment.png
+++ /dev/null
Binary files differ
diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml
deleted file mode 100644
index 7fa7d3997..000000000
--- a/roles/openshift_cfme/tasks/create_pvs.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required CFME PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: list
- kind: pv
- name: "{{ item.pv_name }}"
- register: miq_pv_check
-
-# Skip all of this if the PV already exists
-- block:
- - name: "Ensure the {{ item.pv_label }} template is evaluated"
- template:
- src: "{{ item.pv_template }}.j2"
- dest: "{{ template_dir }}/{{ item.pv_template }}"
-
- - name: "Ensure {{ item.pv_label }} is created"
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- kind: pv
- name: "{{ item.pv_name }}"
- state: present
- delete_after: True
- files:
- - "{{ template_dir }}/{{ item.pv_template }}"
- when:
- - not miq_pv_check.results.results.0
diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml
deleted file mode 100644
index 74ae16d91..000000000
--- a/roles/openshift_cfme/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
----
-######################################################################
-# Users, projects, and privileges
-
-- name: Ensure the CFME user exists
- oc_user:
- state: present
- username: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME namespace exists with CFME user as admin
- oc_project:
- state: present
- name: "{{ openshift_cfme_project }}"
- display_name: "{{ openshift_cfme_project_description }}"
- admin: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME namespace service account is privileged
- oc_adm_policy_user:
- namespace: "{{ openshift_cfme_project }}"
- user: "{{ openshift_cfme_service_account }}"
- resource_kind: scc
- resource_name: privileged
- state: present
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yml
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-######################################################################
-# CFME App Template
-#
-# Note, this is different from the create_pvs.yml tasks in that the
-# application template does not require any jinja2 evaluation.
-#
-# TODO: Handle the case where the server template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- name: Check if the CFME Server template has been created already
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: list
- kind: template
- name: manageiq
- register: miq_server_check
-
-- name: Copy over CFME Server template
- copy:
- src: miq-template.yaml
- dest: "{{ template_dir }}/miq-template.yaml"
-
-- name: Ensure the server template was read from disk
- debug:
- var=r_openshift_cfme_miq_template_content
-
-- name: Ensure CFME Server Template exists
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- kind: template
- name: "manageiq"
- state: present
- content: "{{ r_openshift_cfme_miq_template_content }}"
-
-######################################################################
-# Let's do this
-
-- name: Ensure the CFME Server is created
- oc_process:
- namespace: "{{ openshift_cfme_project }}"
- template_name: manageiq
- create: True
- params:
- APPLICATION_IMG_NAME: "{{ openshift_cfme_application_img_name }}"
- POSTGRESQL_IMG_NAME: "{{ openshift_cfme_postgresql_img_name }}"
- MEMCACHED_IMG_NAME: "{{ openshift_cfme_memcached_img_name }}"
- APPLICATION_IMG_TAG: "{{ openshift_cfme_application_img_tag }}"
- POSTGRESQL_IMG_TAG: "{{ openshift_cfme_postgresql_img_tag }}"
- MEMCACHED_IMG_TAG: "{{ openshift_cfme_memcached_img_tag }}"
- register: cfme_new_app_process
- run_once: True
- when:
- # User said to install CFME in their inventory
- - openshift_cfme_install_app | bool
- # # The server app doesn't exist already
- # - not miq_server_check.results.results.0
-
-- debug:
- var: cfme_new_app_process
-
-######################################################################
-# Various cleanup steps
-
-# TODO: Not sure what to do about this right now. Might be able to
-# just delete it? This currently warns about "Unable to find
-# '<TEMP_DIR>' in expected paths."
-- name: Ensure the temporary PV/App templates are erased
- file:
- path: "{{ item }}"
- state: absent
- with_fileglob:
- - "{{ template_dir }}/*.yaml"
-
-- name: Ensure the temporary PV/app template directory is erased
- file:
- path: "{{ template_dir }}"
- state: absent
-
-######################################################################
-
-- name: Status update
- debug:
- msg: >
- CFME has been deployed. Note that there will be a delay before
- it is fully initialized.
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
deleted file mode 100644
index ca04628a8..000000000
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-
-- name: Set openshift_cfme_nfs_server fact
- when: openshift_cfme_nfs_server is not defined
- set_fact:
- # Hostname/IP of the NFS server. Currently defaults to first master
- openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
-
-- name: Ensure the /exports/ directory exists
- file:
- path: /exports/
- state: directory
- mode: 0755
- owner: root
- group: root
-
-- name: Ensure the miq-pv0X export directories exist
- file:
- path: "/exports/{{ item }}"
- state: directory
- mode: 0775
- owner: root
- group: root
- with_items: "{{ openshift_cfme_pv_exports }}"
-
-- name: Ensure the NFS exports for CFME PVs exist
- copy:
- src: openshift_cfme.exports
- dest: /etc/exports.d/openshift_cfme.exports
- register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
- command: exportfs -ar
- when:
- - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required CFME PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yml
- with_items: "{{ openshift_cfme_pv_data }}"
diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml
deleted file mode 100644
index 02b0f10bf..000000000
--- a/roles/openshift_cfme/tasks/tune_masters.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Ensure bulk image import limit is tuned
- yedit:
- src: /etc/origin/master/master-config.yaml
- key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository'
- value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}"
- state: present
- backup: True
- notify:
- - restart master
-
-- meta: flush_handlers
diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml
deleted file mode 100644
index 406b59364..000000000
--- a/roles/openshift_cfme/tasks/uninstall.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- include_role:
- name: lib_openshift
-
-- name: Uninstall CFME - ManageIQ
- debug:
- msg: Uninstalling Cloudforms Management Engine - ManageIQ
-
-- name: Ensure the CFME project is removed
- oc_project:
- state: absent
- name: "{{ openshift_cfme_project }}"
-
-- name: Ensure the CFME template is removed
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: absent
- kind: template
- name: manageiq
-
-- name: Ensure the CFME PVs are removed
- oc_obj:
- state: absent
- all_namespaces: True
- kind: pv
- name: "{{ item }}"
- with_items: "{{ openshift_cfme_pv_exports }}"
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-- name: Ensure the CFME user is removed
- oc_user:
- state: absent
- username: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME NFS Exports are removed
- file:
- path: /etc/exports.d/openshift_cfme.exports
- state: absent
- register: nfs_exports_removed
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-- name: Ensure the NFS export table is refreshed if exports were removed
- command: exportfs -ar
- when:
- - nfs_exports_removed.changed
- - not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
deleted file mode 100644
index 280f3e97a..000000000
--- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv01
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv01
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
deleted file mode 100644
index fe80dffa5..000000000
--- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv02
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv02
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
deleted file mode 100644
index f84b67ea9..000000000
--- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv03
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv03
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
index 198163127..57e732f37 100644
--- a/roles/openshift_default_storage_class/README.md
+++ b/roles/openshift_default_storage_class/README.md
@@ -1,7 +1,7 @@
openshift_master_storage_class
=========
-A role that deploys configuratons for Openshift StorageClass
+A role that deploys configurations for Openshift StorageClass
Requirements
------------
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 334150f63..5a3e50678 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -6,9 +6,6 @@
with_items:
- role: docker
local_facts:
- additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
- blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
- insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}"
log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
@@ -23,12 +20,6 @@
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- set_fact:
- docker_additional_registries: "{{ openshift.docker.additional_registries
- | default(omit) }}"
- docker_blocked_registries: "{{ openshift.docker.blocked_registries
- | default(omit) }}"
- docker_insecure_registries: "{{ openshift.docker.insecure_registries
- | default(omit) }}"
docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}"
docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}"
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml
index 3268c390f..fbc72c8a3 100644
--- a/roles/openshift_etcd_client_certificates/meta/main.yml
+++ b/roles/openshift_etcd_client_certificates/meta/main.yml
@@ -11,6 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_client_certificates
+dependencies: []
diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml
new file mode 100644
index 000000000..7f8b667f0
--- /dev/null
+++ b/roles/openshift_etcd_client_certificates/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- include_role:
+ name: etcd
+ tasks_from: client_certificates
diff --git a/roles/openshift_etcd_server_certificates/meta/main.yml b/roles/openshift_etcd_server_certificates/meta/main.yml
deleted file mode 100644
index 7750f14af..000000000
--- a/roles/openshift_etcd_server_certificates/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Etcd Server Certificates
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_etcd_facts
-- role: etcd_server_certificates
diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md
index 8cc479c73..014cef111 100644
--- a/roles/openshift_examples/README.md
+++ b/roles/openshift_examples/README.md
@@ -21,13 +21,13 @@ Facts
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------------|-----------------------------------------------------|------------------------------------------|
-| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams |
-| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams |
-| openshift_examples_load_db_templates| true | Loads database templates |
-| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
-| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
+| Name | Default value | |
+|-------------------------------------|----------------------------------------------------------------|------------------------------------------|
+| openshift_examples_load_centos | true when openshift_deployment_type not 'openshift-enterprise' | Load centos image streams |
+| openshift_examples_load_rhel | true if openshift_deployment_type is 'openshift-enterprise' | Load rhel image streams |
+| openshift_examples_load_db_templates| true | Loads database templates |
+| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
+| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
Dependencies
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3a866cedf..7a5bebf6f 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -6,19 +6,46 @@
block:
- - name: Install docker excluder
+ - name: Install docker excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "yum"
- - name: Install openshift excluder
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install docker excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "dnf"
+
+ - name: Install openshift excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "yum"
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install openshift excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "dnf"
- set_fact:
r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 517e0231d..ba1d8f29d 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -55,9 +55,6 @@ def migrate_docker_facts(facts):
""" Apply migrations for docker facts """
params = {
'common': (
- 'additional_registries',
- 'insecure_registries',
- 'blocked_registries',
'options'
),
'node': (
@@ -477,11 +474,7 @@ def set_selectors(facts):
facts if they were not already present
"""
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'online':
- selector = "type=infra"
- else:
- selector = "region=infra"
+ selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
@@ -497,10 +490,10 @@ def set_selectors(facts):
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts['hosted']:
- facts['hosted']['logging'] = {}
- if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
- facts['hosted']['logging']['selector'] = None
+ if 'logging' not in facts:
+ facts['logging'] = {}
+ if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
+ facts['logging']['selector'] = None
if 'etcd' not in facts['hosted']:
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
@@ -568,7 +561,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -770,47 +763,28 @@ def set_deployment_facts_if_unset(facts):
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
- elif deployment_type in ['enterprise']:
- service_type = 'openshift'
facts['common']['service_type'] = service_type
- if 'docker' in facts:
- deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
- addtl_regs = facts['docker'].get('additional_registries', [])
- ent_reg = 'registry.access.redhat.com'
- if ent_reg not in addtl_regs:
- facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
- elif deployment_type == 'atomic-enterprise':
- registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
- if 'disabled_features' in facts['master']:
- if deployment_type == 'atomic-enterprise':
- curr_disabled_features = set(facts['master']['disabled_features'])
- facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
- else:
+ if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
- if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
- else:
- facts['node']['storage_plugin_deps'] = []
+ facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
return facts
@@ -1671,7 +1645,7 @@ def set_container_facts_if_unset(facts):
facts
"""
deployment_type = facts['common']['deployment_type']
- if deployment_type in ['enterprise', 'openshift-enterprise']:
+ if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
@@ -1681,16 +1655,6 @@ def set_container_facts_if_unset(facts):
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
- elif deployment_type == 'atomic-enterprise':
- master_image = 'aep3_beta/aep'
- cli_image = master_image
- node_image = 'aep3_beta/node'
- ovs_image = 'aep3_beta/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
- pod_image = 'aep3_beta/aep-pod'
- router_image = 'aep3_beta/aep-haproxy-router'
- registry_image = 'aep3_beta/aep-docker-registry'
- deployer_image = 'aep3_beta/aep-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
@@ -1705,7 +1669,9 @@ def set_container_facts_if_unset(facts):
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
# If openshift_docker_use_system_container is set and is True ....
if 'use_system_container' in list(facts['docker'].keys()):
- if facts['docker']['use_system_container']:
+ # use safe_get_bool as the inventory variable may not be a
+ # valid boolean on it's own.
+ if safe_get_bool(facts['docker']['use_system_container']):
# ... set the service name to container-engine
facts['docker']['service_name'] = 'container-engine'
@@ -1810,7 +1776,10 @@ class OpenShiftFacts(object):
'etcd',
'hosted',
'master',
- 'node']
+ 'node',
+ 'logging',
+ 'loggingops',
+ 'metrics']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1927,7 +1896,7 @@ class OpenShiftFacts(object):
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
- hostname = choose_hostname(hostname_values, ip_addr)
+ hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
@@ -1938,7 +1907,6 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- debug_level=2,
config_base='/etc/origin')
if 'master' in roles:
@@ -1991,66 +1959,6 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
- metrics=dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- loggingops=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- logging=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
etcd=dict(
storage=dict(
kind=None,
@@ -2097,6 +2005,69 @@ class OpenShiftFacts(object):
router=dict()
)
+ defaults['logging'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['loggingops'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es-ops',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['metrics'] = dict(
+ deploy=False,
+ duration=7,
+ resolution='10s',
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
@@ -2267,19 +2238,6 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
- # remove duplicate and empty strings from registry lists, preserving order
- for cat in ['additional', 'blocked', 'insecure']:
- key = '{0}_registries'.format(cat)
- if key in new_local_facts['docker']:
- val = new_local_facts['docker'][key]
- if isinstance(val, string_types):
- val = [x.strip() for x in val.split(',')]
- seen = set()
- new_local_facts['docker'][key] = list()
- for registry in val:
- if registry not in seen and registry != '':
- seen.add(registry)
- new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
diff --git a/roles/openshift_gcp/defaults/main.yml b/roles/openshift_gcp/defaults/main.yml
new file mode 100644
index 000000000..18fc453b2
--- /dev/null
+++ b/roles/openshift_gcp/defaults/main.yml
@@ -0,0 +1,58 @@
+---
+openshift_gcp_prefix: ''
+
+openshift_gcp_create_network: True
+openshift_gcp_create_registry_bucket: True
+openshift_gcp_kubernetes_cluster_status: owned # or shared
+openshift_gcp_node_group_type: master
+
+openshift_gcp_ssh_private_key: ''
+
+openshift_gcp_project: ''
+openshift_gcp_clusterid: default
+openshift_gcp_region: us-central1
+openshift_gcp_zone: us-central1-a
+
+openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network"
+
+openshift_gcp_iam_service_account: ''
+openshift_gcp_iam_service_account_keyfile: ''
+
+openshift_gcp_master_lb_timeout: 2m
+
+openshift_gcp_infra_network_instance_group: ig-i
+
+openshift_gcp_image: 'rhel-7'
+openshift_gcp_base_image: rhel-7
+
+openshift_gcp_registry_bucket_keyfile: ''
+openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry"
+
+openshift_gcp_node_group_config:
+ - name: master
+ suffix: m
+ tags: ocp-master
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 1
+ - name: infra
+ suffix: i
+ tags: ocp-infra-node ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 1
+ - name: node
+ suffix: n
+ tags: ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 3
+ - name: node-flex
+ suffix: nf
+ tags: ocp-node
+ machine_type: n1-standard-2
+ boot_disk_size: 150
+ scale: 0
+
+openshift_gcp_startup_script_file: ''
+openshift_gcp_user_data_file: ''
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..a7475aaf5
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..d72a11de1
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,320 @@
+#!/bin/bash
+
+set -euo pipefail
+
+if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
+ # Create SSH key for GCE
+ if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
+ fi
+
+ # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+ pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ key_tmp_file='/tmp/ocp-gce-keys'
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+ fi
+fi
+
+metadata=""
+if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
+fi
+if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
+ if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
+ echo "User data file missing at {{ openshift_gcp_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ openshift_gcp_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ openshift_gcp_image }}"
+if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
+else
+ echo "Network '{{ openshift_gcp_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
+else
+ echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
+else
+ echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in openshift_gcp_node_group_config %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
+ --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
+ --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
+ gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
+else
+ echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
+else
+ echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
+else
+ echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
+else
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
+ gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
+ gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
+else
+ echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in openshift_gcp_node_group_config %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..a1e0affec
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in openshift_gcp_node_group_config %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
+
+# Internal master network rules
+teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
+teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ openshift_gcp_project }}" compute firewall-rules delete "{{ openshift_gcp_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in openshift_gcp_node_group_config %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}"
+ teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ openshift_gcp_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index d02a43655..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,8 +197,10 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
+
+ # determine if we're going to run the check (not inactive or disabled)
if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
index 2e60735d6..c72f4c5b3 100644
--- a/roles/openshift_health_checker/library/ocutil.py
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -40,18 +40,17 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- namespace=dict(type="str", required=True),
+ namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
- cmd = [
- locate_oc_binary(),
- '--config', module.params["config_file"],
- '-n', module.params["namespace"],
- ] + shlex.split(module.params["cmd"])
+ cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
+ if module.params["namespace"]:
+ cmd += ['-n', module.params["namespace"]]
+ cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 987c955b6..ce05b44a4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,15 +2,18 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
from ansible.module_utils import six
from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin
+from ansible.module_utils.six import string_types
from ansible.plugins.filter.core import to_bool as ansible_to_bool
@@ -28,7 +31,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -41,29 +44,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
# mainly for testing purposes; see execute_module_with_retries
self._module_retries = 3
self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -84,9 +111,20 @@ class OpenShiftCheck(object):
"""Returns true if this check applies to the ansible-playbook run."""
return True
+ def is_first_master(self):
+ """Determine if running on first master. Returns: bool"""
+ masters = self.get_var("groups", "oo_first_master", default=None) or [None]
+ return masters[0] == self.get_var("ansible_host")
+
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -98,7 +136,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -110,6 +184,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -117,7 +197,20 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
def execute_module_with_retries(self, module_name, module_args):
"""Run execute_module and retry on failure."""
@@ -188,8 +281,23 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
+
+ @staticmethod
+ def normalize(name_list):
+ """Return a clean list of names.
+
+ The input may be a comma-separated string or a sequence. Leading and
+ trailing whitespace characters are removed. Empty items are discarded.
+ """
+ if isinstance(name_list, string_types):
+ name_list = name_list.split(',')
+ return [name.strip() for name in name_list if name.strip()]
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -231,7 +339,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -259,7 +369,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/diagnostics.py b/roles/openshift_health_checker/openshift_checks/diagnostics.py
new file mode 100644
index 000000000..1cfdc1129
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/diagnostics.py
@@ -0,0 +1,62 @@
+"""
+A check to run relevant diagnostics via `oc adm diagnostics`.
+"""
+
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+
+
+DIAGNOSTIC_LIST = (
+ "AggregatedLogging ClusterRegistry ClusterRoleBindings ClusterRoles "
+ "ClusterRouter DiagnosticPod NetworkCheck"
+).split()
+
+
+class DiagnosticCheck(OpenShiftCheck):
+ """A check to run relevant diagnostics via `oc adm diagnostics`."""
+
+ name = "diagnostics"
+ tags = ["health"]
+
+ def is_active(self):
+ return super(DiagnosticCheck, self).is_active() and self.is_first_master()
+
+ def run(self):
+ if self.exec_diagnostic("ConfigContexts"):
+ # only run the other diagnostics if that one succeeds (otherwise, all will fail)
+ diagnostics = self.get_var("openshift_check_diagnostics", default=DIAGNOSTIC_LIST)
+ for diagnostic in self.normalize(diagnostics):
+ self.exec_diagnostic(diagnostic)
+ return {}
+
+ def exec_diagnostic(self, diagnostic):
+ """
+ Execute an 'oc adm diagnostics' command on the remote host.
+ Raises OcNotFound or registers OcDiagFailed.
+ Returns True on success or False on failure (non-zero rc).
+ """
+ config_base = self.get_var("openshift.common.config_base")
+ args = {
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": "adm diagnostics",
+ "extra_args": [diagnostic],
+ }
+
+ result = self.execute_module("ocutil", args, save_as_name=diagnostic + ".failure.json")
+ self.register_file(diagnostic + ".txt", result['result'])
+ if result.get("failed"):
+ if result['result'] == '[Errno 2] No such file or directory':
+ raise OpenShiftCheckException(
+ "OcNotFound",
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+
+ self.register_failure(OpenShiftCheckException(
+ 'OcDiagFailed',
+ 'The {diag} diagnostic reported an error:\n'
+ '{error}'.format(diag=diagnostic, error=result['result'])
+ ))
+ return False
+ return True
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..7956559c6 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -15,31 +15,31 @@ class DiskAvailability(OpenShiftCheck):
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
'/var': {
- 'masters': 40 * 10**9,
- 'nodes': 15 * 10**9,
- 'etcd': 20 * 10**9,
+ 'oo_masters_to_config': 40 * 10**9,
+ 'oo_nodes_to_config': 15 * 10**9,
+ 'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
# see roles/openshift_cli/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
- 'masters': 1 * 10**9,
- 'nodes': 1 * 10**9,
- 'etcd': 1 * 10**9,
+ 'oo_masters_to_config': 1 * 10**9,
+ 'oo_nodes_to_config': 1 * 10**9,
+ 'oo_etcd_to_config': 1 * 10**9,
},
# Used as temporary storage in several cases.
tempfile.gettempdir(): {
- 'masters': 1 * 10**9,
- 'nodes': 1 * 10**9,
- 'etcd': 1 * 10**9,
+ 'oo_masters_to_config': 1 * 10**9,
+ 'oo_nodes_to_config': 1 * 10**9,
+ 'oo_etcd_to_config': 1 * 10**9,
},
}
# recommended disk space for each location under an upgrade context
recommended_disk_upgrade_bytes = {
'/var': {
- 'masters': 10 * 10**9,
- 'nodes': 5 * 10 ** 9,
- 'etcd': 5 * 10 ** 9,
+ 'oo_masters_to_config': 10 * 10**9,
+ 'oo_nodes_to_config': 5 * 10 ** 9,
+ 'oo_etcd_to_config': 5 * 10 ** 9,
},
}
@@ -61,15 +61,19 @@ class DiskAvailability(OpenShiftCheck):
number = float(user_config)
user_config = {
'/var': {
- 'masters': number,
- 'nodes': number,
- 'etcd': number,
+ 'oo_masters_to_config': number,
+ 'oo_nodes_to_config': number,
+ 'oo_etcd_to_config': number,
},
}
except TypeError:
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 9c35f0f92..7c8ac78fe 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,5 +1,7 @@
"""Check that required Docker images are available."""
+from pipes import quote
+from ansible.module_utils import six
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import DockerHostMixin
@@ -32,10 +34,39 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
- skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+ # command for checking if remote registries have an image, without docker pull
+ skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
super(DockerImageAvailability, self).__init__(*args, **kwargs)
+
+ self.registries = dict(
+ # set of registries that need to be checked insecurely (note: not accounting for CIDR entries)
+ insecure=set(self.ensure_list("openshift_docker_insecure_registries")),
+ # set of registries that should never be queried even if given in the image
+ blocked=set(self.ensure_list("openshift_docker_blocked_registries")),
+ )
+
+ # ordered list of registries (according to inventory vars) that docker will try for unscoped images
+ regs = self.ensure_list("openshift_docker_additional_registries")
+ # currently one of these registries is added whether the user wants it or not.
+ deployment_type = self.get_var("openshift_deployment_type")
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
+ self.registries["configured"] = regs
+
+ # for the oreg_url registry there may be credentials specified
+ components = self.get_var("oreg_url", default="").split('/')
+ self.registries["oreg"] = "" if len(components) < 3 else components[0]
+ self.skopeo_command_creds = ""
+ oreg_auth_user = self.get_var('oreg_auth_user', default='')
+ oreg_auth_password = self.get_var('oreg_auth_password', default='')
+ if oreg_auth_user != '' and oreg_auth_password != '':
+ self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
@@ -61,26 +92,25 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not missing_images:
return {}
- registries = self.known_docker_registries()
- if not registries:
- return {"failed": True, "msg": "Unable to retrieve any docker registries."}
-
- available_images = self.available_images(missing_images, registries)
+ available_images = self.available_images(missing_images)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- registries = [
- reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
- for reg in registries
- ]
+ unreachable = [reg for reg, reachable in self.reachable_registries.items() if not reachable]
+ unreachable_msg = "Failed connecting to: {}\n".format(", ".join(unreachable))
+ blocked_msg = "Blocked registries: {}\n".format(", ".join(self.registries["blocked"]))
msg = (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}\n"
- "Checked by: {}"
+ "One or more required container images are not available:\n {missing}\n"
+ "Checked with: {cmd}\n"
+ "Default registries searched: {registries}\n"
+ "{blocked}"
+ "{unreachable}"
).format(
- ",\n ".join(sorted(unavailable_images)),
- ", ".join(registries),
- self.skopeo_img_check_command
+ missing=",\n ".join(sorted(unavailable_images)),
+ cmd=self.skopeo_example_command,
+ registries=", ".join(self.registries["configured"]),
+ blocked=blocked_msg if self.registries["blocked"] else "",
+ unreachable=unreachable_msg if unreachable else "",
)
return dict(failed=True, msg=msg)
@@ -109,13 +139,11 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# containerized etcd may not have openshift_image_tag, see bz 1466622
image_tag = self.get_var("openshift_image_tag", default="latest")
image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
- if not image_info:
- return required
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
image_url = self.get_var("oreg_url", default="") or image_url
- if 'nodes' in host_groups:
+ if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
# The registry-console is for some reason not prefixed with ose- like the other components.
@@ -126,24 +154,23 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# images for containerized components
if self.get_var("openshift", "common", "is_containerized"):
components = set()
- if 'nodes' in host_groups:
+ if 'oo_nodes_to_config' in host_groups:
components.update(["node", "openvswitch"])
- if 'masters' in host_groups: # name is "origin" or "ose"
+ if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose"
components.add(image_info["name"])
for component in components:
required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
- if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
+ if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise
required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
return required
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- registries = self.known_docker_registries()
found_images = []
for image in images:
# docker could have the image name as-is or prefixed with any registry
- imglist = [image] + [reg + "/" + image for reg in registries]
+ imglist = [image] + [reg + "/" + image for reg in self.registries["configured"]]
if self.is_image_local(imglist):
found_images.append(image)
return found_images
@@ -153,29 +180,27 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
result = self.execute_module("docker_image_facts", {"name": image})
return bool(result.get("images")) and not result.get("failed")
- def known_docker_registries(self):
- """Build a list of docker registries available according to inventory vars."""
- regs = list(self.get_var("openshift.docker.additional_registries", default=[]))
-
- deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin" and "docker.io" not in regs:
- regs.append("docker.io")
- elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
- regs.append("registry.access.redhat.com")
-
- return regs
-
- def available_images(self, images, default_registries):
+ def ensure_list(self, registry_param):
+ """Return the task var as a list."""
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1497274
+ # If the result was a string type, place it into a list. We must do this
+ # as using list() on a string will split the string into its characters.
+ # Otherwise cast to a list as was done previously.
+ registry = self.get_var(registry_param, default=[])
+ if not isinstance(registry, six.string_types):
+ return list(registry)
+ return self.normalize(registry)
+
+ def available_images(self, images):
"""Search remotely for images. Returns: list of images found."""
return [
image for image in images
- if self.is_available_skopeo_image(image, default_registries)
+ if self.is_available_skopeo_image(image)
]
- def is_available_skopeo_image(self, image, default_registries):
+ def is_available_skopeo_image(self, image):
"""Use Skopeo to determine if required image exists in known registry(s)."""
- registries = default_registries
-
+ registries = self.registries["configured"]
# If image already includes a registry, only use that.
# NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
# registry.access.redhat.com/rhel7 as if the registry were a namespace.
@@ -186,13 +211,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
registries = [registry]
for registry in registries:
+ if registry in self.registries["blocked"]:
+ continue # blocked will never be consulted
if registry not in self.reachable_registries:
self.reachable_registries[registry] = self.connect_to_registry(registry)
if not self.reachable_registries[registry]:
- continue
+ continue # do not keep trying unreachable registries
+
+ args = dict(registry=registry, image=image)
+ args["tls"] = "false" if registry in self.registries["insecure"] else "true"
+ args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
- args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
- result = self.execute_module_with_retries("command", args)
+ result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
index b4c8957e9..8b20ccb49 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -12,7 +12,7 @@ class EtcdTraffic(OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have etcd in their group names."""
group_names = self.get_var("group_names", default=[])
- valid_group_names = "etcd" in group_names
+ valid_group_names = "oo_etcd_to_config" in group_names
version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
valid_version = version in ((3, 4), (3, 5))
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
index e5d93ff3f..3d75da6f9 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -15,8 +15,12 @@ class EtcdVolume(OpenShiftCheck):
etcd_mount_path = "/var/lib/etcd"
def is_active(self):
- etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
- is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+ etcd_hosts = (
+ self.get_var("groups", "oo_etcd_to_config", default=[]) or
+ self.get_var("groups", "oo_masters_to_config", default=[]) or
+ []
+ )
+ is_etcd_host = self.get_var("ansible_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
def run(self):
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
index d783e6760..e93cc9028 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
@@ -46,7 +46,7 @@ class FluentdConfig(LoggingCheck):
# if check is running on a master, retrieve all running pods
# and check any pod's container for the env var "USE_JOURNAL"
group_names = self.get_var("group_names")
- if "masters" in group_names:
+ if "oo_masters_to_config" in group_names:
use_journald = self.check_fluentd_env_var()
docker_info = self.execute_module("docker_info", {})
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..05ba73ca1 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -30,14 +30,6 @@ class LoggingCheck(OpenShiftCheck):
logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False)
return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
- def is_first_master(self):
- """Determine if running on first master. Returns: bool"""
- # Note: It would be nice to use membership in oo_first_master group, however for now it
- # seems best to avoid requiring that setup and just check this is the first master.
- hostname = self.get_var("ansible_ssh_host") or [None]
- masters = self.get_var("groups", "masters", default=None) or [None]
- return masters[0] == hostname
-
def run(self):
return {}
@@ -78,7 +70,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +84,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index 765ba072d..e7a8ec976 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -14,9 +14,9 @@ class MemoryAvailability(OpenShiftCheck):
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_memory_bytes = {
- "masters": 16 * GIB,
- "nodes": 8 * GIB,
- "etcd": 8 * GIB,
+ "oo_masters_to_config": 16 * GIB,
+ "oo_nodes_to_config": 8 * GIB,
+ "oo_etcd_to_config": 8 * GIB,
}
# https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
memtotal_adjustment = 1 * GIB
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 24f1d938a..cfbdea303 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -21,9 +21,11 @@ class DockerHostMixin(object):
def is_active(self):
"""Only run on hosts that depend on Docker."""
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- is_node = "nodes" in self.get_var("group_names", default=[])
- return super(DockerHostMixin, self).is_active() and (is_containerized or is_node)
+ group_names = set(self.get_var("group_names", default=[]))
+ needs_docker = set(["oo_nodes_to_config"])
+ if self.get_var("openshift.common.is_containerized"):
+ needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
+ return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
def ensure_dependencies(self):
"""
@@ -49,5 +51,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 363c12def..416805c4d 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -24,7 +24,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
- master_or_node = 'masters' in group_names or 'nodes' in group_names
+ master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(OvsVersion, self).is_active() and master_or_node
def run(self):
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 21355c2f0..090e438ff 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -20,9 +20,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages = set()
- if "masters" in group_names:
+ if "oo_masters_to_config" in group_names:
packages.update(self.master_packages(rpm_prefix))
- if "nodes" in group_names:
+ if "oo_nodes_to_config" in group_names:
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index d4aec3ed8..2f09b22fc 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -36,7 +36,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
- master_or_node = 'masters' in group_names or 'nodes' in group_names
+ master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 58864da21..40ad27d5d 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -81,6 +94,7 @@ def skipped(result):
{},
])
def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -98,13 +112,18 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
@@ -128,10 +147,21 @@ def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monke
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -145,7 +175,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -158,9 +188,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -171,24 +201,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -254,3 +311,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py
new file mode 100644
index 000000000..800889fa7
--- /dev/null
+++ b/roles/openshift_health_checker/test/diagnostics_test.py
@@ -0,0 +1,50 @@
+import pytest
+
+from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException
+
+
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(config_base="/etc/origin/")
+ )
+ )
+
+
+def test_module_succeeds(task_vars):
+ check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars)
+ check.is_first_master = lambda: True
+ assert check.is_active()
+ check.exec_diagnostic("spam")
+ assert not check.failures
+
+
+def test_oc_not_there(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "[Errno 2] No such file or directory"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_diagnostic("spam")
+ assert excinfo.value.name == "OcNotFound"
+
+
+def test_module_fails(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "something broke"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ check.exec_diagnostic("spam")
+ assert check.failures and check.failures[0].name == "OcDiagFailed"
+
+
+def test_names_executed(task_vars):
+ task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs"
+
+ def exec_module(module, args, *_):
+ assert "extra_args" in args
+ assert args["extra_args"][0] in diagnostics
+ return {"result": "success"}
+
+ DiagnosticCheck(exec_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..29a325a17 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -4,11 +4,11 @@ from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckE
@pytest.mark.parametrize('group_names,is_active', [
- (['masters'], True),
- (['nodes'], True),
- (['etcd'], True),
- (['masters', 'nodes'], True),
- (['masters', 'etcd'], True),
+ (['oo_masters_to_config'], True),
+ (['oo_nodes_to_config'], True),
+ (['oo_etcd_to_config'], True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], True),
([], False),
(['lb'], False),
(['nfs'], False),
@@ -39,7 +39,7 @@ def test_is_active(group_names, is_active):
])
def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
task_vars = dict(
- group_names=['masters'],
+ group_names=['oo_masters_to_config'],
ansible_mounts=ansible_mounts,
)
@@ -52,7 +52,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -60,7 +60,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
[{
'mount': '/',
@@ -68,7 +68,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
'mount': '/',
@@ -76,7 +76,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
1, # configure lower threshold
[{
'mount': '/',
@@ -84,7 +84,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
# not enough space on / ...
@@ -112,7 +112,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
(
'test with no space available',
- ['masters'],
+ ['oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -122,7 +122,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with a higher configured required value',
- ['masters'],
+ ['oo_masters_to_config'],
100, # set a higher threshold
[{
'mount': '/',
@@ -132,7 +132,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with 1GB available, but "0" GB space requirement',
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
[{
'mount': '/',
@@ -142,7 +142,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with no space available, but "0" GB space requirement',
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
'mount': '/',
@@ -152,7 +152,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with enough space for a node, but not for a master',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -162,7 +162,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test failure with enough space on "/", but not enough on "/var"',
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
# enough space on / ...
@@ -183,17 +183,18 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
(
'test without enough space for master under "upgrade" context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"upgrade",
[{
'mount': '/',
@@ -205,7 +206,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
),
(
'test with enough space for master under "upgrade" context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"upgrade",
[{
'mount': '/',
@@ -217,7 +218,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
),
(
'test with not enough space for master, and non-upgrade context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"health",
[{
'mount': '/',
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 6a7c16c7e..dec99e5db 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -16,21 +16,19 @@ def task_vars():
),
openshift_deployment_type='origin',
openshift_image_tag='',
- group_names=['nodes', 'masters'],
+ group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
)
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
- ("origin", True, [], True),
- ("openshift-enterprise", True, [], True),
- ("enterprise", True, [], False),
- ("online", True, [], False),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
- ("origin", False, ["nodes", "masters"], True),
- ("openshift-enterprise", False, ["etcd"], False),
+ ("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True),
+ ("openshift-enterprise", False, ["oo_etcd_to_config"], False),
+ ("origin", True, ["nfs"], False),
+ ("openshift-enterprise", True, ["lb"], False),
])
def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
@@ -74,7 +72,7 @@ def test_all_images_available_remotely(task_vars, available_locally):
return {'images': [], 'failed': available_locally}
return {}
- task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"]
task_vars['openshift_image_tag'] = 'v3.4'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
@@ -92,7 +90,7 @@ def test_all_images_unavailable(task_vars):
return {} # docker_image_facts failure
- task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
+ task_vars['openshift_docker_additional_registries'] = ["docker.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
task_vars['openshift_image_tag'] = 'latest'
check = DockerImageAvailability(execute_module, task_vars)
@@ -100,7 +98,7 @@ def test_all_images_unavailable(task_vars):
actual = check.run()
assert actual['failed']
- assert "required Docker images are not available" in actual['msg']
+ assert "required container images are not available" in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
@@ -123,7 +121,7 @@ def test_skopeo_update_failure(task_vars, message, extra_words):
return {}
- task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
+ task_vars['openshift_docker_additional_registries'] = ["unknown.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
@@ -141,13 +139,13 @@ def test_skopeo_update_failure(task_vars, message, extra_words):
"spam/eggs:v1", ["test.reg"],
True, True,
False,
- {"test.reg": False},
+ {"test.reg": False, "docker.io": False},
),
(
"spam/eggs:v1", ["test.reg"],
False, True,
False,
- {"test.reg": True},
+ {"test.reg": True, "docker.io": True},
),
(
"eggs.reg/spam/eggs:v1", ["test.reg"],
@@ -164,17 +162,19 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
elif module_name == "command":
return dict(msg="msg", failed=skopeo_failed)
- check = DockerImageAvailability(execute_module, task_vars())
+ tv = task_vars()
+ tv.update({"openshift_docker_additional_registries": registries})
+ check = DockerImageAvailability(execute_module, tv)
check._module_retry_interval = 0
- available = check.is_available_skopeo_image(image, registries)
+ available = check.is_available_skopeo_image(image)
assert available == expect_success
assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
- "origin", False, ['nodes'], None,
+ "origin", False, ['oo_nodes_to_config'], "",
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
@@ -184,7 +184,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
( # set a different URL for images
- "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
+ "origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
@@ -194,7 +194,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
(
- "origin", True, ['nodes', 'masters', 'etcd'], None,
+ "origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], "",
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
@@ -210,7 +210,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
( # enterprise images
- "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ "openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
@@ -224,7 +224,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
(
- "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ "openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
@@ -257,7 +257,7 @@ def test_containerized_etcd():
),
),
openshift_deployment_type="origin",
- group_names=['etcd'],
+ group_names=['oo_etcd_to_config'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index e0dccc062..8fa68c378 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -5,9 +5,9 @@ from openshift_checks.docker_storage import DockerStorage
@pytest.mark.parametrize('is_containerized, group_names, is_active', [
- (False, ["masters", "etcd"], False),
- (False, ["masters", "nodes"], True),
- (True, ["etcd"], True),
+ (False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
+ (False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
+ (True, ["oo_etcd_to_config"], True),
])
def test_is_active(is_containerized, group_names, is_active):
task_vars = dict(
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index fae3e578d..dd6f4ad81 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -4,14 +4,14 @@ from openshift_checks.etcd_traffic import EtcdTraffic
@pytest.mark.parametrize('group_names,version,is_active', [
- (['masters'], "3.5", False),
- (['masters'], "3.6", False),
- (['nodes'], "3.4", False),
- (['etcd'], "3.4", True),
- (['etcd'], "1.5", True),
- (['etcd'], "3.1", False),
- (['masters', 'nodes'], "3.5", False),
- (['masters', 'etcd'], "3.5", True),
+ (['oo_masters_to_config'], "3.5", False),
+ (['oo_masters_to_config'], "3.6", False),
+ (['oo_nodes_to_config'], "3.4", False),
+ (['oo_etcd_to_config'], "3.4", True),
+ (['oo_etcd_to_config'], "1.5", True),
+ (['oo_etcd_to_config'], "3.1", False),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True),
([], "3.4", False),
])
def test_is_active(group_names, version, is_active):
@@ -23,9 +23,9 @@ def test_is_active(group_names, version, is_active):
@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
- (["masters"], True, True, ["Higher than normal", "traffic"]),
- (["masters", "etcd"], False, False, []),
- (["etcd"], False, False, []),
+ (["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]),
+ (["oo_masters_to_config", "oo_etcd_to_config"], False, False, []),
+ (["oo_etcd_to_config"], False, False, []),
])
def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
def execute_module(module_name, *_):
diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py
index 10db253bc..b5b4858d6 100644
--- a/roles/openshift_health_checker/test/fluentd_config_test.py
+++ b/roles/openshift_health_checker/test/fluentd_config_test.py
@@ -82,7 +82,7 @@ def test_check_logging_config_non_master(name, use_journald, logging_driver, ext
return {}
task_vars = dict(
- group_names=["nodes", "etcd"],
+ group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
@@ -128,7 +128,7 @@ def test_check_logging_config_non_master_failed(name, use_journald, logging_driv
return {}
task_vars = dict(
- group_names=["nodes", "etcd"],
+ group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
@@ -192,7 +192,7 @@ def test_check_logging_config_master(name, pods, logging_driver, extra_words):
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
@@ -274,7 +274,7 @@ def test_check_logging_config_master_failed(name, pods, logging_driver, words):
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
@@ -331,7 +331,7 @@ def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods,
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index 1a1c190f6..59c703214 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -98,21 +98,19 @@ def test_oc_failure(problem, expect):
assert expect in str(excinfo)
-groups_with_first_master = dict(masters=['this-host', 'other-host'])
-groups_with_second_master = dict(masters=['other-host', 'this-host'])
-groups_not_a_master = dict(masters=['other-host'])
+groups_with_first_master = dict(oo_first_master=['this-host'])
+groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host'])
@pytest.mark.parametrize('groups, logging_deployed, is_active', [
(groups_with_first_master, True, True),
(groups_with_first_master, False, False),
(groups_not_a_master, True, False),
- (groups_with_second_master, True, False),
(groups_not_a_master, True, False),
])
def test_is_active(groups, logging_deployed, is_active):
task_vars = dict(
- ansible_ssh_host='this-host',
+ ansible_host='this-host',
groups=groups,
openshift_hosted_logging_deploy=logging_deployed,
)
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index aee2f0416..5ec83dd79 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -4,11 +4,11 @@ from openshift_checks.memory_availability import MemoryAvailability
@pytest.mark.parametrize('group_names,is_active', [
- (['masters'], True),
- (['nodes'], True),
- (['etcd'], True),
- (['masters', 'nodes'], True),
- (['masters', 'etcd'], True),
+ (['oo_masters_to_config'], True),
+ (['oo_nodes_to_config'], True),
+ (['oo_etcd_to_config'], True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], True),
([], False),
(['lb'], False),
(['nfs'], False),
@@ -22,32 +22,32 @@ def test_is_active(group_names, is_active):
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
17200,
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
8200,
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
1, # configure lower threshold
2000, # too low for recommended but not for configured
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
2, # configure threshold where adjustment pushes it over
1900,
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
8200,
),
(
- ['masters', 'nodes'],
+ ['oo_masters_to_config', 'oo_nodes_to_config'],
0,
17000,
),
@@ -66,43 +66,43 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
0,
['0.0 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
100,
['0.1 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
24, # configure higher threshold
20 * 1024, # enough to meet recommended but not configured
['20.0 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
24, # configure higher threshold
22 * 1024, # not enough for adjustment to push over threshold
['22.0 GiB'],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
6 * 1024,
['6.0 GiB'],
),
(
- ['etcd', 'masters'],
+ ['oo_etcd_to_config', 'oo_masters_to_config'],
0,
9 * 1024, # enough memory for etcd, not enough for a master
['9.0 GiB'],
),
(
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
0,
# enough memory for a node, not enough for a master
11 * 1024,
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..5a82a43bf 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
@@ -67,14 +67,14 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
+ (['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
- (['masters'], True, False),
- (['nodes'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
+ (['oo_masters_to_config'], True, False),
+ (['oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], False, True),
([], False, False),
- (['etcd'], False, False),
+ (['oo_etcd_to_config'], False, False),
(['lb'], False, False),
(['nfs'], False, False),
])
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 8aa87ca59..9815acb38 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -26,7 +26,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='origin')),
- group_names=['masters'],
+ group_names=['oo_masters_to_config'],
),
set(['origin-master']),
set(['origin-node']),
@@ -34,7 +34,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='atomic-openshift')),
- group_names=['nodes'],
+ group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
set(['atomic-openshift-master']),
@@ -42,14 +42,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='atomic-openshift')),
- group_names=['masters', 'nodes'],
+ group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
set(),
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 7d9035a36..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,7 +2,7 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 8564cd4db..3cf4ce033 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -97,14 +97,14 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
+ (['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
- (['masters'], True, False),
- (['nodes'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
+ (['oo_masters_to_config'], True, False),
+ (['oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], False, True),
([], False, False),
- (['etcd'], False, False),
+ (['oo_etcd_to_config'], False, False),
(['lb'], False, False),
(['nfs'], False, False),
])
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 712a2a591..c234c3740 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,14 +1,33 @@
---
+##########
+# Common #
+##########
+openshift_hosted_infra_selector: "region=infra"
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
+
+openshift_default_projects:
+ default:
+ default_node_selector: ''
+ logging:
+ default_node_selector: ''
+ openshift-infra:
+ default_node_selector: ''
+
+# openshift_additional_projects shares the same format as openshift_default_projects
+openshift_additional_projects: {}
+
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
+openshift_cluster_domain: 'cluster.local'
+
+##########
+# Router #
+##########
r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
-openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
-
-registry_volume_claim: 'registry-claim'
openshift_hosted_router_edits:
- key: spec.strategy.rollingParams.intervalSeconds
@@ -36,20 +55,49 @@ openshift_hosted_routers:
certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
openshift_hosted_router_certificate: {}
-openshift_hosted_registry_cert_expire_days: 730
openshift_hosted_router_create_certificate: True
r_openshift_hosted_router_os_firewall_deny: []
r_openshift_hosted_router_os_firewall_allow: []
+############
+# Registry #
+############
+
+r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_hosted_registry_name: docker-registry
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+registry_volume_claim: 'registry-claim'
+openshift_hosted_registry_cert_expire_days: 730
+
r_openshift_hosted_registry_os_firewall_deny: []
r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
-# NOTE
-# r_openshift_hosted_use_calico_default may be defined external to this role.
-# openshift_use_calico, if defined, may affect other roles or play behavior.
-r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
-r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
+openshift_hosted_registry_serviceaccount: registry
+openshift_hosted_registry_volumes: []
+openshift_hosted_registry_env_vars: {}
+
+# These edits are being specified only to prevent 'changed' on rerun
+openshift_hosted_registry_edits:
+- key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+
+openshift_hosted_registry_force:
+- False
+
+openshift_push_via_dns: False
+
+# NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles
+openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}"
+openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}"
diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
index 7f41529ac..7f41529ac 100644
--- a/roles/openshift_hosted/filter_plugins/filters.py
+++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 28fd396d6..1d70ef7eb 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,7 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/create_projects.yml b/roles/openshift_hosted/tasks/create_projects.yml
new file mode 100644
index 000000000..1b25d0c64
--- /dev/null
+++ b/roles/openshift_hosted/tasks/create_projects.yml
@@ -0,0 +1,14 @@
+---
+- name: Create default projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_default_projects }}"
+
+- name: Create additional projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_additional_projects }}"
diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/firewall.yml
index ff90f3372..1eb2c92c8 100644
--- a/roles/openshift_hosted/tasks/router/firewall.yml
+++ b/roles/openshift_hosted/tasks/firewall.yml
@@ -8,7 +8,7 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -17,9 +17,9 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
-- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool
+- when: l_openshift_hosted_firewall_enabled | bool and l_openshift_hosted_use_firewalld | bool
block:
- name: Add firewalld allow rules
firewalld:
@@ -28,7 +28,7 @@
immediate: true
state: enabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove firewalld allow rules
firewalld:
@@ -37,4 +37,4 @@
immediate: true
state: disabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index 6efe2f63c..d306adf42 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,13 +1,9 @@
---
-- name: Create projects
- oc_project:
- name: "{{ item.key }}"
- node_selector:
- - "{{ item.value.default_node_selector }}"
- with_dict: "{{ openshift_projects }}"
-
-- include: router/router.yml
- when: openshift_hosted_manage_router | default(true) | bool
-
-- include: registry/registry.yml
- when: openshift_hosted_manage_registry | default(true) | bool
+# This role is intended to be used with include_role.
+# include_role:
+# name: openshift_hosted
+# tasks_from: "{{ item }}"
+# with_items:
+# - create_projects.yml
+# - router.yml
+# - registry.yml
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 48f53aef8..f1aa9c5a8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
- when: openshift.hosted.registry.replicas | default(none) is none
block:
@@ -36,30 +40,14 @@
- name: set openshift_hosted facts
set_fact:
openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_name: docker-registry
- openshift_hosted_registry_serviceaccount: registry
openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_volumes: []
- openshift_hosted_registry_env_vars: {}
- openshift_hosted_registry_edits:
- # These edits are being specified only to prevent 'changed' on rerun
- - key: spec.strategy.rollingParams
- value:
- intervalSeconds: 1
- maxSurge: "25%"
- maxUnavailable: "25%"
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- action: put
- openshift_hosted_registry_force:
- - False
- name: Update registry environment variables when pushing via dns
set_fact:
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
- when: openshift_push_via_dns | default(false) | bool
+ when: openshift_push_via_dns | bool
- name: Update registry proxy settings for dc/docker-registry
set_fact:
@@ -137,36 +125,17 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait | bool
- block:
- - name: Ensure OpenShift registry correctly rolls out (best-effort today)
- command: |
- oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- failed_when: false
-
- - name: Determine the latest version of the OpenShift registry deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_registry_latest_version
-
- - name: Sanity-check that the OpenShift registry rolled out correctly
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_registry_rc_phase
- until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
+- name: setup registry list
+ set_fact:
+ r_openshift_hosted_registry_list:
+ - name: "{{ openshift_hosted_registry_name }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+
+- name: Wait for pod (Registry)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
- include: storage/glusterfs.yml
when:
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router.yml
index 2a42b5a7c..2aceef9e4 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_router_os_firewall_deny }}"
- name: Retrieve list of openshift nodes matching router selector
oc_obj:
@@ -48,9 +52,9 @@
certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
-
- # End Block
- when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
+ when:
+ - openshift_hosted_router_create_certificate | bool
+ - openshift_hosted_router_certificate == {}
- name: Create the router service account(s)
oc_serviceaccount:
@@ -82,7 +86,7 @@
replicas: "{{ item.replicas }}"
namespace: "{{ item.namespace | default('default') }}"
# This option is not yet implemented
- # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
+ # force_subdomain: "{{ openshift_hosted_router_force_subdomain | default(none) }}"
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
@@ -94,38 +98,8 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait | bool
- block:
- - name: Ensure OpenShift router correctly rolls out (best-effort today)
- command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace | default('default') }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- with_items: "{{ openshift_hosted_routers }}"
- failed_when: false
-
- - name: Determine the latest version of the OpenShift router deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_routers_latest_version
- with_items: "{{ openshift_hosted_routers }}"
-
- - name: Poll for OpenShift router deployment success
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
- --namespace {{ item.0.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_router_rc_phase
- until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
- with_together:
- - "{{ openshift_hosted_routers }}"
- - "{{ openshift_hosted_routers_latest_version.results }}"
+- name: Wait for pod (Routers)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index a8a6f6fc8..0da8ac8a7 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
@@ -38,11 +38,11 @@
- "{{ docker_registry_service.results.clusterip }}"
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- "{{ openshift_hosted_registry_routehost }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
- expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}"
+ expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}"
register: registry_self_cert
when: docker_registry_self_signed
diff --git a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml b/roles/openshift_hosted/tasks/secure/passthrough.yml
index 5b44fda10..5b44fda10 100644
--- a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml
+++ b/roles/openshift_hosted/tasks/secure/passthrough.yml
diff --git a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml b/roles/openshift_hosted/tasks/secure/reencrypt.yml
index 48e5b0fba..48e5b0fba 100644
--- a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml
+++ b/roles/openshift_hosted/tasks/secure/reencrypt.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index c2954fde1..c2954fde1 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..8553a8098 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2
index f3e82ad4f..f3e82ad4f 120000
--- a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2
+++ b/roles/openshift_hosted/tasks/storage/registry_config.j2
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 318969885..8e905d905 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -3,7 +3,7 @@
assert:
that:
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.region | default(none) is not none
+ - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml
new file mode 100644
index 000000000..056c79334
--- /dev/null
+++ b/roles/openshift_hosted/tasks/wait_for_pod.yml
@@ -0,0 +1,36 @@
+---
+- when: l_openshift_hosted_wait_for_pod | default(False) | bool
+ block:
+ - name: Ensure OpenShift pod correctly rolls out (best-effort today)
+ command: |
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace | default('default') }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig
+ async: 600
+ poll: 15
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+ failed_when: false
+
+ - name: Determine the latest version of the OpenShift pod deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: l_openshift_hosted_wfp_latest_version
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+
+ - name: Poll for OpenShift pod deployment success
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ --namespace {{ item.0.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_wfp_rc_phase
+ until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout"
+ with_together:
+ - "{{ l_openshift_hosted_wfp_items }}"
+ - "{{ l_openshift_hosted_wfp_latest_version.results }}"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index 61da452de..222b63b8a 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -53,7 +53,7 @@ storage:
{% if openshift_hosted_registry_storage_swift_domain is defined %}
domain: {{ openshift_hosted_registry_storage_swift_domain }}
{% endif -%}
-{% if openshift_hosted_registry_storage_swift_domainid %}
+{% if openshift_hosted_registry_storage_swift_domainid is defined %}
domainid: {{ openshift_hosted_registry_storage_swift_domainid }}
{% endif -%}
{% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %}
@@ -63,17 +63,15 @@ storage:
keyfile: /etc/registry/gcs.json
{% endif -%}
{% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %}
- rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }}
+ rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory | default('/registry') }}
{% endif -%}
{% endif -%}
auth:
openshift:
realm: openshift
middleware:
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
registry:
- name: openshift
-{% endif %}
repository:
- name: openshift
options:
@@ -87,7 +85,7 @@ middleware:
baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }}
privatekey: /etc/origin/cloudfront.pem
keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }}
-{% elif openshift.common.version_gte_3_3_or_1_3 | bool %}
+{% else %}
storage:
- name: openshift
{% endif -%}
diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml
index 0821d0e7e..0e756d9e1 100644
--- a/roles/openshift_hosted/vars/main.yml
+++ b/roles/openshift_hosted/vars/main.yml
@@ -1,13 +1,2 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
registry_config_secret_name: registry-config
-
-openshift_default_projects:
- default:
- default_node_selector: ''
- logging:
- default_node_selector: ''
- openshift-infra:
- default_node_selector: ''
-
-openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}"
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 631bf3e2a..47dc9171d 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -1,16 +1,19 @@
---
+# openshift_*_selector variables have been deprecated in favor of
+# openshift_hosted_*_selector variables.
- set_fact:
- openshift_hosted_router_selector: "{{ openshift_hosted_infra_selector }}"
+ openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined
- set_fact:
- openshift_hosted_registry_selector: "{{ openshift_hosted_infra_selector }}"
+ openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined
- name: Set hosted facts
openshift_facts:
- role: hosted
+ role: "{{ item }}"
openshift_env: "{{ hostvars
| oo_merge_hostvars(vars, inventory_hostname)
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
+ with_items: [hosted, logging, loggingops, metrics]
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
deleted file mode 100644
index 680303853..000000000
--- a/roles/openshift_hosted_logging/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-###Required vars:
-
-- openshift_hosted_logging_hostname: kibana.example.com
-- openshift_hosted_logging_elasticsearch_cluster_size: 1
-- openshift_hosted_logging_master_public_url: https://localhost:8443
-
-###Optional vars:
-- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/
-- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead
-- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software.
-- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key
-- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3
-- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying.
-- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory).
-- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead.
-- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`.
-- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster).
-- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534).
-- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value)
-- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to.
-- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
-- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
-- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
-- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
-- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE*
-- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location.
-- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources.
-
-When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some
-additional vars. These work the same as above for their non-ops counterparts,
-but apply to the OPS cluster instance:
-- openshift_hosted_logging_ops_hostname: kibana-ops.example.com
-- openshift_hosted_logging_elasticsearch_ops_cluster_size
-- openshift_hosted_logging_elasticsearch_ops_instance_ram
-- openshift_hosted_logging_elasticsearch_ops_pvc_size
-- openshift_hosted_logging_elasticsearch_ops_pvc_prefix
-- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic
-- openshift_hosted_logging_elasticsearch_ops_nodeselector
-- openshift_hosted_logging_kibana_ops_nodeselector
-- openshift_hosted_logging_curator_ops_nodeselector
diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml
deleted file mode 100644
index d7e83fe9a..000000000
--- a/roles/openshift_hosted_logging/handlers/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
deleted file mode 100644
index ab07a77c1..000000000
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
deleted file mode 100644
index 70b0d67a4..000000000
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
-
-- name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
-
-
-- name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
-
-- name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
-
-- name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
-
-- name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
-
-
-- name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
-
-
-- name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
-
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
-
-- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
deleted file mode 100644
index 78b624109..000000000
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
----
-- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
-- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
-- name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
-- name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
-- name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
-
-- name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
-- name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
-- name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
-
-- name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
-
-- name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
-
-- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
-
-- name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
-
-- name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
-- name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
-- name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
-- name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
-- name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
-- name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
-- name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
-- name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
-- include: update_master_config.yaml
-
-- debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml
deleted file mode 100644
index 42568597a..000000000
--- a/roles/openshift_hosted_logging/tasks/main.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cleanup logging deployment
- include: "{{ role_path }}/tasks/cleanup_logging.yaml"
- when: openshift_hosted_logging_cleanup | default(false) | bool
-
-- name: Deploy logging
- include: "{{ role_path }}/tasks/deploy_logging.yaml"
- when: not openshift_hosted_logging_cleanup | default(false) | bool
diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml
deleted file mode 100644
index 1122e059c..000000000
--- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Adding Kibana route information to loggingPublicURL
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.loggingPublicURL
- yaml_value: "https://{{ logging_hostname }}"
- notify: restart master
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
deleted file mode 100644
index 4b350b244..000000000
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
-ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
-iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
-oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}"
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}"
-kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}"
-pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}"
-es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}"
-es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}"
-es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}"
-es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}"
-es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}"
-es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}"
-es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}"
-es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}"
-es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}"
-es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}"
-es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}"
-es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}"
-es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}"
-fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}"
-kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}"
-kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}"
-cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}"
-cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}"
-ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}"
-journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}"
-journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}"
-ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}"
-deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index f283261c4..829c78728 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -12,13 +12,13 @@ generation for Elasticsearch (it uses JKS) as well as openssl to sign certificat
As part of the installation, it is recommended that you add the Fluentd node selector label
to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
-###Required vars:
+### Required vars:
- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging.
-###Optional vars:
+### Optional vars:
- `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'.
- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
- `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`.
@@ -62,7 +62,6 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
-- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
@@ -91,6 +90,12 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
- `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
+- `openshift_logging_install_eventrouter`: Coupled with `openshift_logging_install_logging`. When both are 'True', eventrouter will be installed. When both are 'False', eventrouter will be uninstalled.
+Other combinations will keep the eventrouter untouched.
+
+Detailed eventrouter configuration can be found in
+- `roles/openshift_logging_eventrouter/README.md`
+
When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
same as above for their non-ops counterparts, but apply to the OPS cluster instance:
- `openshift_logging_es_ops_host`: logging-es-ops
@@ -164,7 +169,7 @@ Elasticsearch OPS too, if using an OPS cluster:
send the raw logs to mux for processing. We do not currently recommend using
this mode, and ansible will warn you about this.
- `openshift_logging_mux_hostname`: Default is "mux." +
- `openshift_master_default_subdomain`. This is the hostname *external*_
+ `openshift_master_default_subdomain`. This is the hostname *external*
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
@@ -194,3 +199,26 @@ Elasticsearch OPS too, if using an OPS cluster:
Defaults to 'logging-mux'.
- `openshift_logging_mux_file_buffer_storage_group`: The storage group used for Mux.
Defaults to '65534'.
+
+### remote syslog forwarding
+- `openshift_logging_fluentd_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false`
+- `openshift_logging_fluentd_remote_syslog_host`: Required, hostname or IP of remote syslog server
+- `openshift_logging_fluentd_remote_syslog_port`: Port of remote syslog server, defaults to `514`
+- `openshift_logging_fluentd_remote_syslog_severity`: Syslog severity level, defaults to `debug`
+- `openshift_logging_fluentd_remote_syslog_facility`: Syslog facility, defaults to `local0`
+- `openshift_logging_fluentd_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty)
+- `openshift_logging_fluentd_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
+- `openshift_logging_fluentd_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
+- `openshift_logging_fluentd_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
+
+The corresponding openshift\_logging\_mux\_* parameters are below.
+
+- `openshift_logging_mux_remote_syslog`: Set `true` to enable remote syslog forwarding, defaults to `false`
+- `openshift_logging_mux_remote_syslog_host`: Required, hostname or IP of remote syslog server
+- `openshift_logging_mux_remote_syslog_port`: Port of remote syslog server, defaults to `514`
+- `openshift_logging_mux_remote_syslog_severity`: Syslog severity level, defaults to `debug`
+- `openshift_logging_mux_remote_syslog_facility`: Syslog facility, defaults to `local0`
+- `openshift_logging_mux_remote_syslog_remove_tag_prefix`: Remove the prefix from the tag, defaults to `''` (empty)
+- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
+- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
+- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 716f0e002..6e7e2557f 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,15 +1,16 @@
---
-openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
+openshift_logging_use_ops: False
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}"
openshift_logging_namespace: logging
openshift_logging_nodeselector: null
openshift_logging_labels: {}
openshift_logging_label_key: ""
openshift_logging_label_value: ""
-openshift_logging_install_logging: True
+openshift_logging_install_logging: False
+
openshift_logging_purge_logging: False
-openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_image_pull_secret: ""
openshift_logging_curator_default_days: 30
openshift_logging_curator_run_hour: 0
@@ -19,23 +20,23 @@ openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
openshift_logging_curator_cpu_limit: 100m
openshift_logging_curator_memory_limit: null
-openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_nodeselector: {}
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: 96Mi
+openshift_logging_kibana_proxy_memory_limit: 256Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
-openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_kibana_nodeselector: {}
+openshift_logging_kibana_ops_nodeselector: {}
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
@@ -49,12 +50,12 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
+openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -69,12 +70,11 @@ openshift_logging_kibana_ops_key: ""
#for the public facing ops kibana certs
openshift_logging_kibana_ops_ca: ""
-openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
-openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
-openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+openshift_logging_fluentd_journal_source: ""
+openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
openshift_logging_fluentd_buffer_queue_limit: 1024
openshift_logging_fluentd_buffer_size_limit: 1m
@@ -84,18 +84,18 @@ openshift_logging_es_port: 9200
openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
-openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
+openshift_logging_es_cluster_size: 1
openshift_logging_es_cpu_limit: 1000m
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
-openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
-openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
-openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
-openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+openshift_logging_es_memory_limit: "8Gi"
+openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}"
+openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}"
+openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}"
+openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_storage_group: "65534"
+openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
@@ -125,16 +125,16 @@ openshift_logging_es_ops_port: 9200
openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
-openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
+openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
-openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
-openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
-openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
+openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
+openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
+openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
+openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
-openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_ops_storage_group: "65534"
+openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
@@ -153,7 +153,7 @@ openshift_logging_es_ops_key: ""
openshift_logging_es_ops_ca_ext: ""
# storage related defaults
-openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index eac086e81..330e7e59a 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -45,6 +45,21 @@ def map_from_pairs(source, delim="="):
return dict(item.split(delim) for item in source.split(","))
+def serviceaccount_name(qualified_sa):
+ ''' Returns the simple name from a fully qualified name '''
+ return qualified_sa.split(":")[-1]
+
+
+def serviceaccount_namespace(qualified_sa, default=None):
+ ''' Returns the namespace from a fully qualified name '''
+ seg = qualified_sa.split(":")
+ if len(seg) > 1:
+ return seg[-2]
+ if default:
+ return default
+ return seg[-1]
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -56,5 +71,7 @@ class FilterModule(object):
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
'map_from_pairs': map_from_pairs,
- 'es_storage': es_storage
+ 'es_storage': es_storage,
+ 'serviceaccount_name': serviceaccount_name,
+ 'serviceaccount_namespace': serviceaccount_namespace
}
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 35accfb78..f10df8da5 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -171,22 +171,25 @@ class OpenshiftLoggingFacts(OCBaseCommand):
if comp is not None:
spec = dc_item["spec"]["template"]["spec"]
facts = dict(
+ name=name,
selector=dc_item["spec"]["selector"],
replicas=dc_item["spec"]["replicas"],
serviceAccount=spec["serviceAccount"],
containers=dict(),
volumes=dict()
)
+ if "nodeSelector" in spec:
+ facts["nodeSelector"] = spec["nodeSelector"]
+ if "supplementalGroups" in spec["securityContext"]:
+ facts["storageGroups"] = spec["securityContext"]["supplementalGroups"]
+ facts["spec"] = spec
if "volumes" in spec:
for vol in spec["volumes"]:
clone = copy.deepcopy(vol)
clone.pop("name", None)
facts["volumes"][vol["name"]] = clone
for container in spec["containers"]:
- facts["containers"][container["name"]] = dict(
- image=container["image"],
- resources=container["resources"],
- )
+ facts["containers"][container["name"]] = container
self.add_facts_for(comp, "deploymentconfigs", name, facts)
def facts_for_services(self, namespace):
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 45298e345..ffed956a4 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -92,6 +92,7 @@
with_items:
- rolebinding-reader
- daemonset-admin
+ - prometheus-metrics-viewer
# delete our configmaps
- name: delete configmaps
@@ -105,3 +106,9 @@
- logging-elasticsearch
- logging-fluentd
- logging-mux
+
+## EventRouter
+- include_role:
+ name: openshift_logging_eventrouter
+ when:
+ not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 9c8f0986a..f526fd734 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -139,10 +139,10 @@
# TODO: make idempotent
- name: Generate proxy session
- set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
+ set_fact: session_secret={{ 200 | oo_random_word}}
check_mode: no
# TODO: make idempotent
- name: Generate oauth client secret
- set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
+ set_fact: oauth_secret={{ 64 | oo_random_word}}
check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..21fd79c28 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -69,17 +69,18 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
+ _es_containers: "{{item.0.containers}}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
when:
@@ -95,8 +96,6 @@
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
@@ -123,7 +122,7 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -134,15 +133,18 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ _es_containers: "{{item.0.containers}}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
when:
@@ -165,6 +167,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -268,4 +271,12 @@
openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
+
+## EventRouter
+- include_role:
+ name: openshift_logging_eventrouter
+ when:
+ openshift_logging_install_eventrouter | default(false) | bool
+
+
- include: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index f475024dd..15f6a23e6 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -30,10 +30,11 @@
check_mode: no
become: no
-- include: "{{ role_path }}/tasks/install_logging.yaml"
- when: openshift_logging_install_logging | default(false) | bool
+- include: install_logging.yaml
+ when:
+ - openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_logging.yaml"
+- include: delete_logging.yaml
when:
- not openshift_logging_install_logging | default(false) | bool
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 49e8a18af..f60fa8d7d 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.6') }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('v3.7') }}"
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 75bd479be..554aa5bb2 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -6,7 +6,7 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_
openshift_logging_elasticsearch_namespace: logging
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
-openshift_logging_elasticsearch_cpu_limit: 1000m
+openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
@@ -33,13 +33,19 @@ openshift_logging_elasticsearch_pvc_size: ""
openshift_logging_elasticsearch_pvc_dynamic: false
openshift_logging_elasticsearch_pvc_pv_selector: {}
openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
-openshift_logging_elasticsearch_storage_group: '65534'
+openshift_logging_elasticsearch_storage_group: ['65534']
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
+openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy"
+openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
+openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
+openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
+openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
+
# this is used to determine if this is an operations deployment or a non-ops deployment
# simply used for naming purposes
openshift_logging_elasticsearch_ops_deployment: false
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 1e800b1d6..df2c17aa0 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -37,6 +37,7 @@
# we want to make sure we have all the necessary components here
# service account
+
- name: Create ES service account
oc_serviceaccount:
state: present
@@ -77,6 +78,38 @@
resource_name: rolebinding-reader
user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+- oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: system:auth-delegator
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace}}:aggregated-logging-elasticsearch"
+
+# logging-metrics-reader role
+- template:
+ src: logging-metrics-role.j2
+ dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
+ vars:
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ role_namespace: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_namespace(openshift_logging_elasticsearch_namespace) }}"
+ role_user: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_name }}"
+
+- name: Create logging-metrics-reader-role
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ -n "{{ openshift_logging_elasticsearch_namespace }}"
+ create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
+ register: prometheus_out
+ check_mode: no
+ ignore_errors: yes
+
+- fail:
+ msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
+ when:
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
+
# View role and binding
- name: Generate logging-elasticsearch-view-role
template:
@@ -206,6 +239,32 @@
- port: 9200
targetPort: "restapi"
+- name: Set logging-{{ es_component}}-prometheus service
+ oc_service:
+ state: present
+ name: "logging-{{es_component}}-prometheus"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: proxy
+ port: 443
+ targetPort: 4443
+ selector:
+ component: "{{ es_component }}-prometheus"
+ provider: openshift
+
+- oc_edit:
+ kind: service
+ name: "logging-{{es_component}}-prometheus"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ separator: '#'
+ content:
+ metadata#annotations#service.alpha.openshift.io/serving-cert-secret-name: "prometheus-tls"
+ metadata#annotations#prometheus.io/scrape: "true"
+ metadata#annotations#prometheus.io/scheme: "https"
+ metadata#annotations#prometheus.io/path: "_prometheus/metrics"
+
- name: Check to see if PVC already exists
oc_obj:
state: list
@@ -260,7 +319,7 @@
delete_after: true
- set_fact:
- es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
when: openshift_logging_elasticsearch_deployment_name == ""
- set_fact:
@@ -281,6 +340,8 @@
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
+ es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 3c8f390c4..1ed886627 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -29,7 +29,9 @@ spec:
serviceAccountName: aggregated-logging-elasticsearch
securityContext:
supplementalGroups:
- - {{openshift_logging_elasticsearch_storage_group}}
+{% for group in es_storage_groups %}
+ - {{group}}
+{% endfor %}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
@@ -37,6 +39,40 @@ spec:
{% endfor %}
{% endif %}
containers:
+ - name: proxy
+ image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}}
+ imagePullPolicy: Always
+ args:
+ - --upstream-ca=/etc/elasticsearch/secret/admin-ca
+ - --https-address=:4443
+ - -provider=openshift
+ - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret={{ 16 | oo_random_word | b64encode }}
+ - -upstream=https://localhost:9200
+ - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
+ - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
+ - --tls-cert=/etc/tls/private/tls.crt
+ - --tls-key=/etc/tls/private/tls.key
+ - -pass-access-token
+ - -pass-user-headers
+ ports:
+ - containerPort: 4443
+ name: proxy
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: proxy-tls
+ readOnly: true
+ - mountPath: /etc/elasticsearch/secret
+ name: elasticsearch
+ readOnly: true
+ resources:
+ limits:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}"
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+ requests:
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
-
name: "elasticsearch"
image: {{image}}
@@ -49,6 +85,9 @@ spec:
{% endif %}
requests:
memory: "{{es_memory_limit}}"
+{% if es_container_security_context %}
+ securityContext: {{ es_container_security_context | to_yaml }}
+{% endif %}
ports:
-
containerPort: 9200
@@ -94,7 +133,7 @@ spec:
value: "30"
-
name: "POD_LABEL"
- value: "component={{component}}"
+ value: "component={{component}}"
-
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -102,6 +141,9 @@ spec:
-
name: "HAS_DATA"
value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+ -
+ name: "PROMETHEUS_USER"
+ value: "{{openshift_logging_elasticsearch_prometheus_sa}}"
volumeMounts:
- name: elasticsearch
@@ -120,6 +162,9 @@ spec:
timeoutSeconds: 30
periodSeconds: 5
volumes:
+ - name: proxy-tls
+ secret:
+ secretName: prometheus-tls
- name: elasticsearch
secret:
secretName: logging-elasticsearch
diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2
new file mode 100644
index 000000000..d9800e5a5
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2
@@ -0,0 +1,31 @@
+---
+apiVersion: v1
+kind: List
+items:
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: Role
+ metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ rules:
+ - apiGroups:
+ - metrics.openshift.io
+ resources:
+ - prometheus
+ verbs:
+ - view
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: prometheus-metrics-viewer
+ subjects:
+ - kind: ServiceAccount
+ namespace: {{ role_namespace }}
+ name: {{ role_user }}
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
new file mode 100644
index 000000000..da313d68b
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -0,0 +1,20 @@
+Event router
+------------
+
+A pod forwarding kubernetes events to EFK aggregated logging stack.
+
+- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
+- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+
+- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
+
+Configuration variables:
+
+- `openshift_logging_eventrouter_image_prefix`: The prefix for the eventrouter logging image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
+- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
+- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
+- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
new file mode 100644
index 000000000..34e33f75f
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version | default('latest') }}"
+openshift_logging_eventrouter_replicas: 1
+openshift_logging_eventrouter_sink: stdout
+openshift_logging_eventrouter_nodeselector: ""
+openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_memory_limit: 128Mi
+openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
new file mode 100644
index 000000000..91708e54b
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
@@ -0,0 +1,103 @@
+# this openshift template should match (except nodeSelector) jinja2 template in
+# ../templates/eventrouter-template.j2
+kind: Template
+apiVersion: v1
+metadata:
+ name: eventrouter-template
+ annotations:
+ description: "A pod forwarding kubernetes events to EFK aggregated logging stack."
+ tags: "events,EFK,logging"
+objects:
+ - kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: aggregated-logging-eventrouter
+ - kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: event-reader
+ rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ - kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ data:
+ config.json: |-
+ {
+ "sink": "${SINK}"
+ }
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ spec:
+ selector:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ replicas: ${REPLICAS}
+ template:
+ metadata:
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ name: logging-eventrouter
+ spec:
+ serviceAccount: aggregated-logging-eventrouter
+ serviceAccountName: aggregated-logging-eventrouter
+ containers:
+ - name: kube-eventrouter
+ image: ${IMAGE}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: ${MEMORY}
+ cpu: ${CPU}
+ requires:
+ memory: ${MEMORY}
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/eventrouter
+ volumes:
+ - name: config-volume
+ configMap:
+ name: logging-eventrouter
+ - kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: event-reader-binding
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-eventrouter
+ namespace: ${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: event-reader
+
+parameters:
+ - name: SINK
+ displayName: Sink
+ value: stdout
+ - name: REPLICAS
+ displayName: Replicas
+ value: "1"
+ - name: IMAGE
+ displayName: Image
+ value: "docker.io/openshift/origin-logging-eventrouter:latest"
+ - name: MEMORY
+ displayName: Memory
+ value: "128Mi"
+ - name: CPU
+ displayName: CPU
+ value: "100m"
+ - name: NAMESPACE
+ displayName: Namespace
+ value: default
diff --git a/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml
new file mode 100644
index 000000000..cf0abbde9
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/delete_eventrouter.yaml
@@ -0,0 +1,40 @@
+---
+# delete eventrouter
+- name: Delete EventRouter service account
+ oc_serviceaccount:
+ state: absent
+ name: "aggregated-logging-eventrouter"
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+
+- name: Delete event-reader cluster role
+ oc_clusterrole:
+ state: absent
+ name: event-reader
+
+- name: Unset privileged permissions for EventRouter
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ resource_kind: cluster-role
+ resource_name: event-reader
+ state: absent
+ user: "system:serviceaccount:{{ openshift_logging_eventrouter_namespace }}:aggregated-logging-eventrouter"
+
+- name: Delete EventRouter configmap
+ oc_configmap:
+ state: absent
+ name: logging-eventrouter
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+
+- name: Delete EventRouter DC
+ oc_obj:
+ state: absent
+ name: logging-eventrouter
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: dc
+
+- name: Delete EventRouter Template
+ oc_obj:
+ state: absent
+ name: eventrouter-template
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: template
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
new file mode 100644
index 000000000..8df7435e2
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -0,0 +1,59 @@
+---
+# initial checks
+- assert:
+ msg: Invalid sink type "{{openshift_logging_eventrouter_sink}}", only one of "{{__eventrouter_sinks}}" allowed
+ that: openshift_logging_eventrouter_sink in __eventrouter_sinks
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# create EventRouter deployment config
+- name: Generate EventRouter template
+ template:
+ src: eventrouter-template.j2
+ dest: "{{ tempdir }}/templates/eventrouter-template.yaml"
+ vars:
+ node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}"
+
+- name: Create EventRouter template
+ oc_obj:
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ kind: template
+ name: eventrouter-template
+ state: present
+ files:
+ - "{{ tempdir }}/templates/eventrouter-template.yaml"
+
+- name: Process EventRouter template
+ oc_process:
+ state: present
+ template_name: eventrouter-template
+ namespace: "{{ openshift_logging_eventrouter_namespace }}"
+ params:
+ IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
+ REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
+ NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
+ SINK: "{{ openshift_logging_eventrouter_sink }}"
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
new file mode 100644
index 000000000..58e5a559f
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- include: "{{ role_path }}/tasks/install_eventrouter.yaml"
+ when: openshift_logging_install_eventrouter | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_eventrouter.yaml"
+ when: not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
new file mode 100644
index 000000000..9ff4c7e80
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -0,0 +1,109 @@
+# this jinja2 template should always match (except nodeSelector) openshift template in
+# ../files/eventrouter-template.yaml
+kind: Template
+apiVersion: v1
+metadata:
+ name: eventrouter-template
+ annotations:
+ description: "A pod forwarding kubernetes events to EFK aggregated logging stack."
+ tags: "events,EFK,logging"
+objects:
+ - kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: aggregated-logging-eventrouter
+ - kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: event-reader
+ rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ - kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ data:
+ config.json: |-
+ {
+ "sink": "${SINK}"
+ }
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: logging-eventrouter
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ spec:
+ selector:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ replicas: ${REPLICAS}
+ template:
+ metadata:
+ labels:
+ component: eventrouter
+ logging-infra: eventrouter
+ provider: openshift
+ name: logging-eventrouter
+ spec:
+ serviceAccount: aggregated-logging-eventrouter
+ serviceAccountName: aggregated-logging-eventrouter
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{ key }}: "{{ value }}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: kube-eventrouter
+ image: ${IMAGE}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: ${MEMORY}
+ cpu: ${CPU}
+ requires:
+ memory: ${MEMORY}
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/eventrouter
+ volumes:
+ - name: config-volume
+ configMap:
+ name: logging-eventrouter
+ - kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: event-reader-binding
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-eventrouter
+ namespace: ${NAMESPACE}
+ roleRef:
+ kind: ClusterRole
+ name: event-reader
+
+parameters:
+ - name: SINK
+ displayName: Sink
+ value: stdout
+ - name: REPLICAS
+ displayName: Replicas
+ value: "1"
+ - name: IMAGE
+ displayName: Image
+ value: "docker.io/openshift/origin-logging-eventrouter:latest"
+ - name: MEMORY
+ displayName: Memory
+ value: "128Mi"
+ - name: CPU
+ displayName: CPU
+ value: "100m"
+ - name: NAMESPACE
+ displayName: Namespace
+ value: default
diff --git a/roles/openshift_logging_eventrouter/vars/main.yaml b/roles/openshift_logging_eventrouter/vars/main.yaml
new file mode 100644
index 000000000..bdf561fe3
--- /dev/null
+++ b/roles/openshift_logging_eventrouter/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+__eventrouter_sinks: ["glog", "stdout"]
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 30d3d854a..82326bdd1 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -50,8 +50,6 @@ openshift_logging_fluentd_aggregating_key_path: none
openshift_logging_fluentd_aggregating_passphrase: none
### Deprecating in 3.6
-openshift_logging_fluentd_es_copy: false
-
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#fluentd_config_contents:
#fluentd_throttle_contents:
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 74b4d7db4..37960afd1 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -1,5 +1,8 @@
---
- fail:
+ msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory
+ when: openshift_logging_fluentd_es_copy is defined
+- fail:
msg: Only one Fluentd nodeselector key pair should be provided
when: openshift_logging_fluentd_nodeselector.keys() | count > 1
diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
index 46de94d60..6e07b403a 100644
--- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2
+++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
@@ -49,7 +49,9 @@
@include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
+</label>
+<label @OUTPUT>
## matches
@include configs.d/openshift/output-pre-*.conf
@include configs.d/openshift/output-operations.conf
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index a4afb6618..f286b0656 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -66,7 +66,9 @@ spec:
readOnly: true
- name: filebufferstorage
mountPath: /var/lib/fluentd
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
mountPath: /etc/fluent/muxkeys
readOnly: true
@@ -94,8 +96,6 @@ spec:
value: "{{ openshift_logging_fluentd_ops_client_key }}"
- name: "OPS_CA"
value: "{{ openshift_logging_fluentd_ops_ca }}"
- - name: "ES_COPY"
- value: "false"
- name: "JOURNAL_SOURCE"
value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
- name: "JOURNAL_READ_FROM_HEAD"
@@ -116,10 +116,62 @@ spec:
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: "MUX_CLIENT_MODE"
value: "{{ openshift_logging_mux_client_mode }}"
{% endif %}
+{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %}
+ - name: "TRANSFORM_EVENTS"
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %}
+ - name: USE_REMOTE_SYSLOG
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_host is defined %}
+ - name: REMOTE_SYSLOG_HOST
+ value: "{{ openshift_logging_fluentd_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_port is defined %}
+ - name: REMOTE_SYSLOG_PORT
+ value: "{{ openshift_logging_fluentd_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_severity is defined %}
+ - name: REMOTE_SYSLOG_SEVERITY
+ value: "{{ openshift_logging_fluentd_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_facility is defined %}
+ - name: REMOTE_SYSLOG_FACILITY
+ value: "{{ openshift_logging_fluentd_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %}
+ - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+ value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %}
+ - name: REMOTE_SYSLOG_TAG_KEY
+ value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_use_record is defined %}
+ - name: REMOTE_SYSLOG_USE_RECORD
+ value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %}
+ - name: REMOTE_SYSLOG_PAYLOAD_KEY
+ value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
+{% endif %}
+
volumes:
- name: runlogjournal
hostPath:
@@ -148,7 +200,9 @@ spec:
- name: dockerdaemoncfg
hostPath:
path: /etc/docker
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
secret:
secretName: logging-mux
diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf
index aeaa705ee..bf61c9811 100644
--- a/roles/openshift_logging_mux/files/fluent.conf
+++ b/roles/openshift_logging_mux/files/fluent.conf
@@ -25,7 +25,9 @@
@include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
+</label>
+<label @OUTPUT>
## matches
@include configs.d/openshift/output-pre-*.conf
@include configs.d/openshift/output-operations.conf
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index ff18d3270..4cc48139f 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -119,6 +119,52 @@ spec:
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}"
+
+{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %}
+ - name: USE_REMOTE_SYSLOG
+ value: "true"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_host is defined %}
+ - name: REMOTE_SYSLOG_HOST
+ value: "{{ openshift_logging_mux_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_port is defined %}
+ - name: REMOTE_SYSLOG_PORT
+ value: "{{ openshift_logging_mux_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_severity is defined %}
+ - name: REMOTE_SYSLOG_SEVERITY
+ value: "{{ openshift_logging_mux_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_facility is defined %}
+ - name: REMOTE_SYSLOG_FACILITY
+ value: "{{ openshift_logging_mux_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %}
+ - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+ value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_tag_key is defined %}
+ - name: REMOTE_SYSLOG_TAG_KEY
+ value: "{{ openshift_logging_mux_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_use_record is defined %}
+ - name: REMOTE_SYSLOG_USE_RECORD
+ value: "{{ openshift_logging_mux_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_payload_key is defined %}
+ - name: REMOTE_SYSLOG_PAYLOAD_KEY
+ value: "{{ openshift_logging_mux_remote_syslog_payload_key }}"
+{% endif %}
+
volumes:
- name: config
configMap:
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index 7789d2232..088d0b171 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
- when: not openshift.common.version_gte_3_1_or_1_1 | bool
-
- name: Add Management Infrastructure project
oc_project:
name: management-infra
@@ -61,4 +57,3 @@
resource_kind: "{{ item.resource_kind }}"
user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- when: openshift.common.version_gte_3_2_or_1_2 | bool
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
new file mode 100644
index 000000000..3a71d9211
--- /dev/null
+++ b/roles/openshift_management/README.md
@@ -0,0 +1,475 @@
+# CloudForms Availability
+
+As noted in [Limitations - Product Choice](#product-choice),
+[CloudForms](https://www.redhat.com/en/technologies/management/cloudforms)
+(CFME) 4.6 is not yet released. Until such time, this role is limited
+to installing [ManageIQ](http://manageiq.org) (MIQ), the open source
+project that CFME is based on.
+
+After CFME 4.6 is available to customers this role will enable
+(optional) logic which will install CFME or MIQ based on your
+deployment type (`openshift_deployment_type`):
+
+* `openshift-enterprise` → CloudForms
+* `origin` → ManageIQ
+
+
+# Table of Contents
+
+ * [Introduction](#introduction)
+ * [Important Notes](#important-notes)
+ * [Requirements](#requirements)
+ * [Role Variables](#role-variables)
+ * [Getting Started](#getting-started)
+ * [All Defaults](#all-defaults)
+ * [External NFS Storage](#external-nfs-storage)
+ * [Override PV sizes](#override-pv-sizes)
+ * [Override Memory Requirements](#override-memory-requirements)
+ * [External PostgreSQL Database](#external-postgresql-database)
+ * [Limitations](#limitations)
+ * [Product Choice](#product-choice)
+ * [Configuration](#configuration)
+ * [Database](#database)
+ * [Podified](#podified)
+ * [External](#external)
+ * [Storage Classes](#storage-classes)
+ * [NFS (Default)](#nfs-default)
+ * [NFS External](#nfs-external)
+ * [Cloud Provider](#cloud-provider)
+ * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)
+ * [Customization](#customization)
+ * [Uninstall](#uninstall)
+ * [Additional Information](#additional-information)
+
+# Introduction
+
+This role will allow a user to install CFME 4.6 or MIQ on an OCP
+3.7 cluster. The role provides customization options for overriding
+default deployment parameters. This role allows the user to deploy
+different installation flavors:
+
+* **Fully Podified** - In this way all application services are ran as
+ pods in the container platform.
+* **External Database** - In this way the application utilizes an
+ externally hosted database server. All other services are ran in the
+ container platform.
+
+This role includes the following storage class options:
+
+* NFS - **Default** - local, on cluster
+* NFS External - NFS somewhere else, like a storage appliance
+* Cloud Provider - Use automatic storage provisioning from your cloud
+ provider (*gce* or *aws*)
+* Preconfigured - **expert only**, assumes you created everything ahead
+ of time
+
+You may skip ahead to the [Getting Started](#getting-started) section
+now for examples of how to set up your Ansible inventory for various
+deployment configurations. However, you are **strongly urged** to
+first read through the [Configuration](#configuration) and
+[Customization](#customization) sections as well as the following
+[Important Notes](#important-notes).
+
+## Important Notes
+
+Not all parameters are present in **both** template versions (podified
+db and external db). For example, while the podified database template
+has a `POSTGRESQL_MEM_REQ` parameter, no such parameter is present in
+the external db template, as there is no need for this information due
+to there being no databases that require pods.
+
+*Be extra careful* if you are overriding template
+parameters. Including parameters not defined in a template **will
+cause errors**.
+
+**Container Provider Integration** - If you want add your container
+platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you
+must ensure that the infrastructure management hooks are installed.
+
+* During your OCP/Origin install, ensure that you have the
+ `openshift_use_manageiq` parameter set to `true` in your inventory
+ at install time. This will create a `management-infra` project and a
+ service account user.
+* After CFME/MIQ is installed, obtain the `management-admin` service
+ account token and copy it somewhere safe.
+
+```bash
+$ oc serviceaccounts get-token -n management-infra management-admin
+eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig
+```
+
+* In the CFME/MIQ web interface, navigate to `Compute` →
+ `Containers` → `Providers` and select `⚙ Configuration` → `⊕
+ Add a new Containers Provider`
+
+*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.*
+
+
+
+# Requirements
+
+The **default** requirements are listed in the table below. These can
+be overridden through customization parameters (See
+[Customization](#customization), below).
+
+**Note** that the application performance will suffer, or possibly
+even fail to deploy, if these requirements are not satisfied.
+
+
+| Item | Requirement | Description | Customization Parameter |
+|---------------------|---------------|----------------------------------------------|-------------------------------|
+| Application Memory | `≥ 4.0 Gi` | Minimum required memory for the application | `APPLICATION_MEM_REQ` |
+| Application Storage | `≥ 5.0 Gi` | Minimum PV size required for the application | `APPLICATION_VOLUME_CAPACITY` |
+| PostgreSQL Memory | `≥ 6.0 Gi` | Minimum required memory for the database | `POSTGRESQL_MEM_REQ` |
+| PostgreSQL Storage | `≥ 15.0 Gi` | Minimum PV size required for the database | `DATABASE_VOLUME_CAPACITY` |
+| Cluster Hosts | `≥ 3` | Number of hosts in your cluster | |
+
+The implications of this table are summarized below:
+
+* You need several cluster nodes
+* Your cluster nodes must have lots of memory available
+* You will need several GiB's of storage available, either locally or
+ on your cloud provider
+* PV sizes can be changed by providing override values to template
+ parameters (see also: [Customization](#customization))
+
+# Role Variables
+
+The following is a table of the publicly exposed variables that may be
+used in your Ansible inventory to control the behavior of this
+installer.
+
+
+| Variable | Required | Default | Description |
+|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
+| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
+| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. |
+| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| **PRODUCT CHOICE** | | | | |
+| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |
+| **STORAGE CLASSES** | | | | |
+| `openshift_management_storage_class` | **No** | `nfs` | Storage type to use, choices: <ul><li>`nfs` - Best used for proof-of-concept installs. Will setup NFS on a cluster host (defaults to your first master in the inventory file) to back the required PVCs. The application requires a PVC and the database (which may be hosted externally) may require a second. PVC minimum required sizes are 5GiB for the MIQ application, and 15GiB for the PostgreSQL database (20GiB minimum available space on a volume/partition if used specifically for NFS purposes)</li> <li>`nfs_external` - You are using an external NFS server, such as a netapp appliance. See the [Configuration - Storage Classes](#storage-classes) section below for required information.</li> <li>`preconfigured` - This CFME role will do NOTHING to modify storage settings. This option assumes expert knowledge and that you have done everything required ahead of time.</li> <li>`cloudprovider` - You are using an OCP cloudprovider integration for your storage class. For this to work you must have already configured the required inventory parameters for your cloud provider. Ensure `openshift_cloudprovider_kind` is defined (aws or gce) and that the applicable cloudprovider parameters are provided. |
+| `openshift_management_storage_nfs_external_hostname` | **No** | `false` | If you are using an *external NFS server*, such as a netapp appliance, then you must set the hostname here. Leave the value as `false` if you are not using external NFS. <br /> *Additionally*: **External NFS REQUIRES** that you create the NFS exports that will back the application PV and optionally the database PV.
+| `openshift_management_storage_nfs_base_dir` | **No** | `/exports/` | If you are using **External NFS** then you may set the base path to the exports location here. <br />**Local NFS Note**: You *may* also change this value if you want to change the default path used for local NFS exports. |
+| `openshift_management_storage_nfs_local_hostname` | **No** | `false` | If you do not have an `[nfs]` group in your inventory, or want to simply manually define the local NFS host in your cluster, set this parameter to the hostname of the preferred NFS server. The server must be a part of your OCP/Origin cluster. |
+| **CUSTOMIZATION OPTIONS** | | | | |
+| `openshift_management_template_parameters` | **No** | `{}` | A dictionary of any parameters you want to override in the application/pv templates.
+
+* <sup>[1]</sup> The `cfme-template`s will be available and
+ automatically detected once CFME 4.6 is released
+
+
+# Getting Started
+
+Below are some inventory snippets that can help you get started right
+away.
+
+If you want to install CFME/MIQ at the same time you install your
+OCP/Origin cluster, ensure that `openshift_management_install_management` is set
+to `true` in your inventory. Call the standard
+`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+installation.
+
+If you are installing CFME/MIQ on an *already provisioned cluster*
+then you can call the CFME/MIQ playbook directly:
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+```
+
+*Note: Use `miq-template` in the following examples for ManageIQ installs*
+
+## All Defaults
+
+This example is the simplest. All of the default values and choices
+are used. This will result in a fully podified CFME installation. All
+application components, as well as the PostgreSQL database will be
+created as pods in the container platform.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+```
+
+## External NFS Storage
+
+This is as the previous example, except that instead of using local
+NFS services in the cluster it will use an external NFS server (such
+as a storage appliance). Note the two new parameters:
+
+* `openshift_management_storage_class` - set to `nfs_external`
+* `openshift_management_storage_nfs_external_hostname` - set to the hostname
+ of the NFS server
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_storage_class=nfs_external
+openshift_management_storage_nfs_external_hostname=nfs.example.com
+```
+
+If the external NFS host exports directories under a different parent
+directory, such as `/exports/hosted/prod` then we would add an
+additional parameter, `openshift_management_storage_nfs_base_dir`:
+
+```ini
+# ...
+openshift_management_storage_nfs_base_dir=/exports/hosted/prod
+```
+
+## Override PV sizes
+
+This example will override the PV sizes. Note that we set the PV sizes
+in the template parameters, `openshift_management_template_parameters`. This
+ensures that the application/db will be able to make claims on created
+PVs without clobbering each other.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_template_parameters={'APPLICATION_VOLUME_CAPACITY': '10Gi', 'DATABASE_VOLUME_CAPACITY': '25Gi'}
+```
+
+## Override Memory Requirements
+
+In a test or proof-of-concept installation you may need to reduce the
+application/database memory requirements to fit within your
+capacity. Note that reducing memory limits can result in reduced
+performance or a complete failure to initialize the application.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_template_parameters={'APPLICATION_MEM_REQ': '3000Mi', 'POSTGRESQL_MEM_REQ': '1Gi', 'ANSIBLE_MEM_REQ': '512Mi'}
+```
+
+Here we have instructed the installer to process the application
+template with the parameter `APPLICATION_MEM_REQ` set to `3000Mi`,
+`POSTGRESQL_MEM_REQ` set to `1Gi`, and `ANSIBLE_MEM_REQ` set to
+`512Mi`.
+
+These parameters can be combined with the PV size override parameters
+displayed in the previous example.
+
+## External PostgreSQL Database
+
+To use an external database you must change the
+`openshift_management_app_template` parameter value to `miq-template-ext-db`
+or `cfme-template-ext-db`.
+
+Additionally, database connection information **must** be supplied in
+the `openshift_management_template_parameters` customization parameter. See
+[Customization - Database - External](#external) for more
+information.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template-ext-db
+openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
+```
+
+# Limitations
+
+This release is the first OpenShift CFME release in the OCP 3.7
+series. It is not complete yet.
+
+## Product Choice
+
+Due to staggered release dates, **CFME support is not
+integrated**. Presently this role will only deploy a ManageIQ
+installation. This role will be updated once CFME 4.6 is released and
+this limitation note will be removed.
+
+# Configuration
+
+Before you can deploy CFME you must decide *how* you want to deploy
+it. There are two major decisions to make:
+
+1. Do you want an external, or a podified database?
+1. Which storage class will back your PVs?
+
+## Database
+
+### Podified
+
+Any `POSTGRES_*` or `DATABASE_*` template parameters in
+[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or
+[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml)
+may be customized through the `openshift_management_template_parameters`
+hash.
+
+### External
+
+Any `POSTGRES_*` or `DATABASE_*` template parameters in
+[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml)
+or
+[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml)
+may be customized through the `openshift_management_template_parameters`
+hash.
+
+External PostgreSQL databases require you to provide database
+connection parameters. You must set the required connection keys in
+the `openshift_management_template_parameters` parameter in your
+inventory. The following keys are required:
+
+* `DATABASE_USER`
+* `DATABASE_PASSWORD`
+* `DATABASE_IP`
+* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*
+* `DATABASE_NAME`
+
+Your inventory would contain a line similar to this:
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template-ext-db
+openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
+```
+
+**Note** the new value for the `openshift_management_app_template`
+parameter, `cfme-template-ext-db` (ManageIQ installations would use
+`miq-template-ext-db` instead).
+
+At run time you may run into errors similar to this:
+
+```
+TASK [openshift_management : Ensure the CFME App is created] ***********************************
+task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
+Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 *******
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""}
+```
+
+Or like this:
+
+```
+TASK [openshift_management : Ensure the CFME App is created] ***********************************
+task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
+Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 *******
+fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg":
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""},
+```
+
+While intimidating at first, there are useful bits of information in
+here. Examine the error output closely and we can tell exactly what is
+wrong.
+
+In the first example we see `Endpoints in version \"v1\" cannot be
+handled as a Endpoints: [pos 218]: json: decNum: got first char
+...`. This is because in my example I used the value `foo` for the
+parameter `DATABASE_PORT`.
+
+In the second example we see `The Endpoints \"postgresql\" is invalid:
+subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP
+address ...`. This is because in my example I used the value `doo` in
+the `DATABASE_IP` field.
+
+Luckily for us when the templates are processed behind the scenes they
+are also running type checking validation. So, don't worry, just look
+closely at the errors and ensure you are providing the correct values
+for each parameter.
+
+## Storage Classes
+
+OpenShift CFME supports several storage class options.
+
+### NFS (Default)
+
+The NFS storage class is best suited for proof-of-concept and
+test/demo deployments. It is also the **default** storage class for
+deployments. No additional configuration is required for this
+choice.
+
+Customization is provided through the following role variables:
+
+* `openshift_management_storage_nfs_base_dir`
+* `openshift_management_storage_nfs_local_hostname`
+
+### NFS External
+
+External NFS leans on pre-configured NFS servers to provide exports
+for the required PVs. For external NFS you must have:
+
+* For CFME: a `cfme-app` and optionally a `cfme-db` (for podified database) exports
+* For ManageIQ: an `miq-app` and optionally an `miq-db` (for podified database) exports
+
+Configuration is provided through the following role variables:
+
+* `openshift_management_storage_nfs_external_hostname`
+* `openshift_management_storage_nfs_base_dir`
+
+The `openshift_management_storage_nfs_external_hostname` parameter must be
+set to the hostname or IP of your external NFS server.
+
+If `/exports` is not the parent directory to your exports then you
+must set the base directory via the
+`openshift_management_storage_nfs_base_dir` parameter.
+
+For example, if your server export is `/exports/hosted/prod/cfme-app`
+then you must set
+`openshift_management_storage_nfs_base_dir=/exports/hosted/prod`.
+
+### Cloud Provider
+
+CFME can also use a cloud provider storage to back required PVs. For
+this functionality to work you must have also configured the
+`openshift_cloudprovider_kind` variable and all associated parameters
+specific to your chosen cloud provider.
+
+Using this storage class, when the application is created the required
+PVs will automatically be provisioned using the configured cloud
+provider storage integration.
+
+There are no additional variables to configure the behavior of this
+storage class.
+
+### Preconfigured (Expert Configuration Only)
+
+The *preconfigured* storage class implies that you know exactly what
+you're doing and that all storage requirements have been taken care
+ahead of time. Typically this means that you've already created the
+correctly sized PVs.
+
+There are no additional variables to configure the behavior of this
+storage class.
+
+# Customization
+
+Application and database parameters may be customized by means of the
+`openshift_management_template_parameters` inventory parameter.
+
+**For example**, if you wanted to reduce the memory requirement of the
+PostgreSQL pod then you could configure the parameter like this:
+
+`openshift_management_template_parameters={'POSTGRESQL_MEM_REQ': '1Gi'}`
+
+When the CFME template is processed `1Gi` will be used for the value
+of the `POSTGRESQL_MEM_REQ` template parameter.
+
+Any parameter in the `parameters` section of the
+[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or
+[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml)
+may be overridden through the `openshift_management_template_parameters`
+hash. This applies to **CloudForms** installations as well:
+[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),
+[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml).
+
+
+# Uninstall
+
+This role includes a playbook to uninstall and erase the CFME/MIQ
+installation:
+
+* `playbooks/byo/openshift-management/uninstall.yml`
+
+# Additional Information
+
+The upstream project,
+[@manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods),
+contains a wealth of additional information useful for managing and
+operating your CFME installation. Topics include:
+
+* [Verifying Successful Installation](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
+* [Disabling Image Change Triggers](https://github.com/ManageIQ/manageiq-pods#disable-image-change-triggers)
+* [Scaling CFME](https://github.com/ManageIQ/manageiq-pods#scale-miq)
+* [Backing up and Restoring the DB](https://github.com/ManageIQ/manageiq-pods#backup-and-restore-of-the-miq-database)
+* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
new file mode 100644
index 000000000..ebb56313f
--- /dev/null
+++ b/roles/openshift_management/defaults/main.yml
@@ -0,0 +1,90 @@
+---
+# Namespace for the CFME project
+openshift_management_project: openshift-management
+# Namespace/project description
+openshift_management_project_description: CloudForms Management Engine
+
+######################################################################
+# BASE TEMPLATE AND DATABASE OPTIONS
+######################################################################
+# Which flavor of CFME would you like? You may install CFME using a
+# podified PostgreSQL server, or you may choose to use an existing
+# PostgreSQL server.
+#
+# Choose 'miq-template' for a podified database install
+# Choose 'miq-template-ext-db' for an external database install
+openshift_management_app_template: miq-template
+# If you are using the miq-template-ext-db template then you must add
+# the required database parameters to the
+# openshift_management_template_parameters variable.
+
+######################################################################
+# STORAGE OPTIONS
+######################################################################
+# DEFAULT - 'nfs'
+# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
+openshift_management_storage_class: nfs
+# * nfs - Best used for proof-of-concept installs. Will setup NFS on a
+# cluster host (defaults to your first master in the inventory file)
+# to back the required PVCs. The application requires a PVC and the
+# database (which may be hosted externally) may require a
+# second. PVC minimum required sizes are: 5GiB for the MIQ
+# application, and 15GiB for the PostgreSQL database (20GiB minimum
+# available space on an volume/partition if used specifically for
+# NFS purposes)
+#
+# * nfs_external - You are using an external NFS server, such as a
+# netapp appliance. See the STORAGE - NFS OPTIONS section below for
+# required information.
+#
+# * preconfigured - This CFME role will do NOTHING to modify storage
+# settings. This option assumes expert knowledge and that you have
+# done everything required ahead of time.
+#
+# * cloudprovider - You are using an OCP cloudprovider integration for
+# your storage class. For this to work you must have already
+# configured the required inventory parameters for your cloud
+# provider
+#
+# Ensure 'openshift_cloudprovider_kind' is defined (aws or gce) and
+# that the applicable cloudprovider parameters are provided.
+
+#---------------------------------------------------------------------
+# STORAGE - NFS OPTIONS
+#---------------------------------------------------------------------
+# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
+# netapp appliance, then you must set the hostname here. Leave the
+# value as 'false' if you are not using external NFS.
+openshift_management_storage_nfs_external_hostname: false
+# [OPTIONAL] - If you are using external NFS then you must set the base
+# path to the exports location here.
+#
+# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
+# that will back the application PV and optionally the database
+# pv. Export path definitions, relative to
+# {{ openshift_management_storage_nfs_base_dir }}
+#
+# LOCAL NFS NOTE:
+#
+# You may may also change this value if you want to change the default
+# path used for local NFS exports.
+openshift_management_storage_nfs_base_dir: /exports
+#
+# LOCAL NFS NOTE:
+#
+# You may override the automatically selected LOCAL NFS server by
+# setting this variable. Useful for testing specific task files.
+openshift_management_storage_nfs_local_hostname: false
+
+######################################################################
+# SCAFFOLDING - These are parameters we pre-seed that a user may or
+# may not set later
+######################################################################
+# A hash of parameters you want to override or set in the
+# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
+# your inventory file as a simple hash. Acceptable values are defined
+# under the .parameters list in files/miq-template{-ext-db}.yaml
+# Example:
+#
+# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
+openshift_management_template_parameters: {}
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
new file mode 100644
index 000000000..c3bc1d20c
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-backup
+spec:
+ template:
+ metadata:
+ name: cloudforms-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml
new file mode 100644
index 000000000..92598ce82
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cloudforms-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml
new file mode 100644
index 000000000..4fe349897
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/cfme-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..0cdd821b5
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-db-pv
+metadata:
+ name: cloudforms-db-pv
+ annotations:
+ description: PV Template for CFME PostgreSQL DB
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/cfme-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the CFME DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..527090ae8
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-app-pv
+metadata:
+ name: cloudforms-app-pv
+ annotations:
+ description: PV Template for CFME Server
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/cfme-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the CFME APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
new file mode 100644
index 000000000..8b23f8a33
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-restore
+spec:
+ template:
+ metadata:
+ name: cloudforms-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ - name: cfme-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ - name: cfme-prod-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml
new file mode 100644
index 000000000..d2ece9298
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml
@@ -0,0 +1,38 @@
+allowHostDirVolumePlugin: false
+allowHostIPC: false
+allowHostNetwork: false
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities:
+apiVersion: v1
+defaultAddCapabilities:
+- SYS_ADMIN
+fsGroup:
+ type: RunAsAny
+groups:
+- system:cluster-admins
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus.
+ creationTimestamp:
+ name: cfme-sysadmin
+priority: 10
+readOnlyRootFilesystem: false
+requiredDropCapabilities:
+- MKNOD
+- SYS_CHROOT
+runAsUser:
+ type: RunAsAny
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+users:
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- persistentVolumeClaim
+- secret
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
new file mode 100644
index 000000000..4a04f3372
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
@@ -0,0 +1,763 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-ext-db
+metadata:
+ name: cloudforms-ext-db
+ annotations:
+ description: CloudForms appliance with persistent storage using a external DB host
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
new file mode 100644
index 000000000..d7c9f5af7
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
@@ -0,0 +1,940 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: CloudForms appliance with persistent storage
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${DATABASE_VOLUME_CAPACITY}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Exposes the database server
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the database
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: cfme-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ containers:
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: cfme-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: cfme-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml
new file mode 100644
index 000000000..044cb73a5
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: manageiq-backup
+spec:
+ template:
+ metadata:
+ name: manageiq-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: docker.io/manageiq/postgresql:latest
+ command:
+ - "/opt/manageiq/container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: manageiq-secrets
+ key: database-url
+ volumeMounts:
+ - name: miq-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: miq-backup-vol
+ persistentVolumeClaim:
+ claimName: manageiq-backup
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml
new file mode 100644
index 000000000..25696ef23
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: manageiq-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml
new file mode 100644
index 000000000..a5cf54d4e
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/miq-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml
new file mode 100644
index 000000000..a803bebe2
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-db-pv
+metadata:
+ name: manageiq-db-pv
+ annotations:
+ description: PV Template for MIQ PostgreSQL DB
+ tags: PVS, MIQ
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: miq-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/miq-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the MIQ DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml
new file mode 100644
index 000000000..1288544d1
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-app-pv
+metadata:
+ name: manageiq-app-pv
+ annotations:
+ description: PV Template for MIQ Server
+ tags: PVS, MIQ
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: miq-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/miq-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the MIQ APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml
new file mode 100644
index 000000000..eea284dd4
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: manageiq-restore
+spec:
+ template:
+ metadata:
+ name: manageiq-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: docker.io/manageiq/postgresql:latest
+ command:
+ - "/opt/manageiq/container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: manageiq-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: miq-backup-vol
+ mountPath: "/backups"
+ - name: miq-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: miq-backup-vol
+ persistentVolumeClaim:
+ claimName: manageiq-backup
+ - name: miq-prod-vol
+ persistentVolumeClaim:
+ claimName: manageiq-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml
new file mode 100644
index 000000000..82cd5d49e
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml
@@ -0,0 +1,771 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-ext-db
+metadata:
+ name: manageiq-ext-db
+ annotations:
+ description: ManageIQ appliance with persistent storage using a external DB host
+ tags: instant-app,manageiq,miq
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances ManageIQ pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: MEMCACHED_SERVICE_NAME
+ value: "${MEMCACHED_SERVICE_NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for ManageIQ backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: miq-privileged
+ serviceAccountName: miq-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: miq-anyuid
+ serviceAccountName: miq-anyuid
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: manageiq
+- name: V2_KEY
+ displayName: ManageIQ Encryption Key
+ required: true
+ description: Encryption Key for ManageIQ Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: docker.io/manageiq/memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_IMG_NAME
+ displayName: Application Image Name
+ description: This is the Application image name requested to deploy.
+ value: docker.io/manageiq/manageiq-pods
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the ManageIQ Frontend Application image tag/version requested to deploy.
+ value: frontend-latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the ManageIQ Backend Application image tag/version requested to deploy.
+ value: backend-latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: docker.io/manageiq/embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: docker.io/manageiq/httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-template.yaml b/roles/openshift_management/files/templates/manageiq/miq-template.yaml
new file mode 100644
index 000000000..3f5a12205
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-template.yaml
@@ -0,0 +1,948 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq
+metadata:
+ name: manageiq
+ annotations:
+ description: ManageIQ appliance with persistent storage
+ tags: instant-app,manageiq,miq
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances ManageIQ pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${DATABASE_VOLUME_CAPACITY}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: MEMCACHED_SERVICE_NAME
+ value: "${MEMCACHED_SERVICE_NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for ManageIQ backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Exposes the database server
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the database
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: miq-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: miq-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ containers:
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: miq-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: miq-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: miq-privileged
+ serviceAccountName: miq-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: miq-anyuid
+ serviceAccountName: miq-anyuid
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: manageiq
+- name: V2_KEY
+ displayName: ManageIQ Encryption Key
+ required: true
+ description: Encryption Key for ManageIQ Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: docker.io/manageiq/postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: docker.io/manageiq/memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_IMG_NAME
+ displayName: Application Image Name
+ description: This is the Application image name requested to deploy.
+ value: docker.io/manageiq/manageiq-pods
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the ManageIQ Frontend Application image tag/version requested to deploy.
+ value: frontend-latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the ManageIQ Backend Application image tag/version requested to deploy.
+ value: backend-latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: docker.io/manageiq/embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: docker.io/manageiq/httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/handlers/main.yml b/roles/openshift_management/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_management/handlers/main.yml
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_management/meta/main.yml
index 162d817f0..07ad51126 100644
--- a/roles/openshift_cfme/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,4 +16,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: openshift_master_facts
diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml
new file mode 100644
index 000000000..e45ea8d43
--- /dev/null
+++ b/roles/openshift_management/tasks/accounts.yml
@@ -0,0 +1,28 @@
+---
+# This role task file is responsible for user/system account creation,
+# and ensuring correct access is provided as required.
+- name: Ensure the CFME system accounts exist
+ oc_serviceaccount:
+ namespace: "{{ openshift_management_project }}"
+ state: present
+ name: "{{ openshift_management_flavor_short }}{{ item.name }}"
+ with_items:
+ - "{{ __openshift_system_account_sccs }}"
+
+- name: Ensure the CFME system accounts have all the required SCCs
+ oc_adm_policy_user:
+ namespace: "{{ openshift_management_project }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ resource_kind: scc
+ resource_name: "{{ item.resource_name }}"
+ with_items:
+ - "{{ __openshift_system_account_sccs }}"
+
+- name: Ensure the CFME system accounts have the required roles
+ oc_adm_policy_user:
+ namespace: "{{ openshift_management_project }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ resource_kind: role
+ resource_name: "{{ item.resource_name }}"
+ with_items:
+ - "{{ __openshift_management_system_account_roles }}"
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
new file mode 100644
index 000000000..86c4d0010
--- /dev/null
+++ b/roles/openshift_management/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+######################################################################)
+# Users, projects, and privileges
+
+- name: Run pre-install CFME validation checks
+ include: validate.yml
+
+- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists"
+ oc_project:
+ state: present
+ name: "{{ openshift_management_project }}"
+ display_name: "{{ openshift_management_project_description }}"
+
+- name: Create and Authorize CFME Accounts
+ include: accounts.yml
+
+######################################################################
+# STORAGE - Initialize basic storage class
+#---------------------------------------------------------------------
+# * nfs - set up NFS shares on the first master for a proof of concept
+- name: Create required NFS exports for CFME app storage
+ include: storage/nfs.yml
+ when: openshift_management_storage_class == 'nfs'
+
+#---------------------------------------------------------------------
+# * external - NFS again, but pointing to a pre-configured NFS server
+- name: Note Storage Type - External NFS
+ debug:
+ msg: "Setting up external NFS storage, openshift_management_storage_class is {{ openshift_management_storage_class }}"
+ when: openshift_management_storage_class == 'nfs_external'
+
+#---------------------------------------------------------------------
+# * cloudprovider - use an existing cloudprovider based storage
+- name: Note Storage Type - Cloud Provider
+ debug:
+ msg: Validating cloud provider storage type, openshift_management_storage_class is 'cloudprovider'
+ when: openshift_management_storage_class == 'cloudprovider'
+
+#---------------------------------------------------------------------
+# * preconfigured - don't do anything, assume it's all there ready to go
+- name: Note Storage Type - Preconfigured
+ debug:
+ msg: Skipping storage configuration, openshift_management_storage_class is 'preconfigured'
+ when: openshift_management_storage_class == 'preconfigured'
+
+######################################################################
+# APPLICATION TEMPLATE
+- name: Install the CFME app and PV templates
+ include: template.yml
+
+######################################################################
+# APP & DB Storage
+
+# For local/external NFS backed installations
+- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}"
+ include: storage/create_nfs_pvs.yml
+ when:
+ - openshift_management_storage_class in ['nfs', 'nfs_external']
+
+######################################################################
+# CREATE APP
+- name: Note the correct ext-db template name
+ set_fact:
+ openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db"
+ when:
+ - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+
+- name: Note the correct podified db template name
+ set_fact:
+ openshift_management_template_name: "{{ openshift_management_flavor }}"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+- name: Ensure the CFME App is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_template_name }}"
+ create: True
+ params: "{{ openshift_management_template_parameters }}"
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
new file mode 100644
index 000000000..31c845725
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -0,0 +1,69 @@
+---
+# Create the required PVs for the App and the DB
+- name: Note the App PV Size from Template Parameters
+ set_fact:
+ openshift_management_app_pv_size: "{{ openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY }}"
+ when:
+ - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is defined
+
+- name: Note the App PV Size from defaults
+ set_fact:
+ openshift_management_app_pv_size: "{{ __openshift_management_app_pv_size }}"
+ when:
+ - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined
+
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Note the DB PV Size from Template Parameters
+ set_fact:
+ openshift_management_db_pv_size: "{{ openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY }}"
+ when:
+ - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is defined
+
+ - name: Note the DB PV Size from defaults
+ set_fact:
+ openshift_management_db_pv_size: "{{ __openshift_management_db_pv_size }}"
+ when:
+ - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
+
+- name: Check if the CFME App PV has been created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: pv
+ name: "{{ openshift_management_flavor_short }}-app"
+ register: miq_app_pv_check
+
+- name: Check if the CFME DB PV has been created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: pv
+ name: "{{ openshift_management_flavor_short }}-db"
+ register: miq_db_pv_check
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+- name: Ensure the CFME App PV is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_flavor }}-app-pv"
+ create: True
+ params:
+ PV_SIZE: "{{ openshift_management_app_pv_size }}"
+ BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"
+ NFS_HOST: "{{ openshift_management_nfs_server }}"
+ when: miq_app_pv_check.results.results == [{}]
+
+- name: Ensure the CFME DB PV is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_flavor }}-db-pv"
+ create: True
+ params:
+ PV_SIZE: "{{ openshift_management_db_pv_size }}"
+ BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"
+ NFS_HOST: "{{ openshift_management_nfs_server }}"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - miq_db_pv_check.results.results == [{}]
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
new file mode 100644
index 000000000..696808328
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -0,0 +1,67 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+
+- name: Ensure we save the local NFS server if one is provided
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
+ when:
+ - openshift_management_storage_nfs_local_hostname is defined
+ - openshift_management_storage_nfs_local_hostname != False
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the local NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
+ when:
+ - openshift_management_nfs_server is not defined
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the external NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
+ when:
+ - openshift_management_storage_class == "nfs_external"
+
+- name: Failed NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ "Unable to detect an NFS server. The 'nfs_external'
+ openshift_management_storage_class option requires that you set
+ openshift_management_storage_nfs_external_hostname. NFS hosts detected
+ for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
+
+- name: Setting up NFS storage
+ block:
+ - name: Include the NFS Setup role tasks
+ include_role:
+ role: openshift_nfs
+ tasks_from: setup
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+
+ - name: Create the App export
+ include_role:
+ role: openshift_nfs
+ tasks_from: create_export
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+ l_nfs_export_config: "{{ openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ openshift_management_flavor_short }}-app"
+ l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
+
+ - name: Create the DB export
+ include_role:
+ role: openshift_nfs
+ tasks_from: create_export
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+ l_nfs_export_config: "{{ openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ openshift_management_flavor_short }}-db"
+ l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+ delegate_to: "{{ openshift_management_nfs_server }}"
diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml
new file mode 100644
index 000000000..d8bf7aa3e
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/storage.yml
@@ -0,0 +1,3 @@
+---
+- include: nfs.yml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
new file mode 100644
index 000000000..299158ac4
--- /dev/null
+++ b/roles/openshift_management/tasks/template.yml
@@ -0,0 +1,128 @@
+---
+# Tasks for ensuring the correct CFME templates are landed on the remote system
+
+######################################################################
+# CFME App Template
+#
+# Note, this is different from the create_nfs_pvs.yml tasks in that
+# the application template does not require any jinja2 evaluation.
+#
+# TODO: Handle the case where the server or PV templates are updated
+# in openshift-ansible and the change needs to be landed on the
+# managed cluster.
+
+######################################################################
+# STANDARD PODIFIED DATABASE TEMPLATE
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Check if the CFME Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}"
+ register: miq_server_check
+
+ - when: miq_server_check.results.results == [{}]
+ block:
+ - name: Copy over CFME Server template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure CFME Server Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml"
+
+######################################################################
+# EXTERNAL DATABASE TEMPLATE
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template']
+ block:
+ - name: Check if the CFME Ext-DB Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-ext-db"
+ register: miq_ext_db_server_check
+
+ - when: miq_ext_db_server_check.results.results == [{}]
+ block:
+ - name: Copy over CFME Ext-DB Server template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure CFME Ext-DB Server Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-ext-db"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml"
+
+# End app template creation.
+######################################################################
+
+######################################################################
+# Begin conditional PV template creations
+
+# Required for the application server
+- name: Check if the CFME App PV template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-app-pv"
+ register: miq_app_pv_check
+
+- when: miq_app_pv_check.results.results == [{}]
+ block:
+ - name: Copy over CFME App PV template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure CFME App PV Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-app-pv"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+
+#---------------------------------------------------------------------
+
+# Required for database if the installation is fully podified
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Check if the CFME DB PV template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-db-pv"
+ register: miq_db_pv_check
+
+ - when: miq_db_pv_check.results.results == [{}]
+ block:
+ - name: Copy over CFME DB PV template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure CFME DB PV Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-db-pv"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
diff --git a/roles/openshift_management/tasks/uninstall.yml b/roles/openshift_management/tasks/uninstall.yml
new file mode 100644
index 000000000..09fbc609f
--- /dev/null
+++ b/roles/openshift_management/tasks/uninstall.yml
@@ -0,0 +1,23 @@
+---
+- name: Start removing all the objects
+ command: "oc delete -n {{ openshift_management_project }} {{ item }} --all"
+ with_items:
+ - rc
+ - dc
+ - po
+ - svc
+ - pv
+ - pvc
+ - statefulsets
+ - routes
+
+- name: Remove the project
+ command: "oc delete -n {{ openshift_management_project }} project {{ openshift_management_project }}"
+
+- name: Verify project has been destroyed
+ command: "oc get project {{ openshift_management_project }}"
+ ignore_errors: True
+ register: project_terminated
+ until: project_terminated.stderr.find("NotFound") != -1
+ delay: 5
+ retries: 30
diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml
new file mode 100644
index 000000000..8b20bdc5e
--- /dev/null
+++ b/roles/openshift_management/tasks/validate.yml
@@ -0,0 +1,90 @@
+---
+# Validate configuration parameters passed to the openshift_management role
+
+######################################################################
+# CORE PARAMETERS
+- name: Ensure openshift_management_app_template is valid
+ assert:
+ that:
+ - openshift_management_app_template in __openshift_management_app_templates
+
+ msg: |
+ "openshift_management_app_template must be one of {{
+ __openshift_management_app_templates | join(', ') }}"
+
+- name: Ensure openshift_management_storage_class is a valid type
+ assert:
+ that:
+ - openshift_management_storage_class in __openshift_management_storage_classes
+ msg: |
+ "openshift_management_storage_class must be one of {{
+ __openshift_management_storage_classes | join(', ') }}"
+
+######################################################################
+# STORAGE PARAMS - NFS
+- name: Ensure external NFS storage has a valid NFS server hostname defined
+ assert:
+ that:
+ - openshift_management_storage_nfs_external_hostname | default(False)
+ msg: |
+ The selected storage class 'nfs_external' requires a valid
+ hostname for the openshift_management_storage_nfs_hostname parameter
+ when:
+ - openshift_management_storage_class == 'nfs_external'
+
+- name: Ensure local NFS storage has a valid NFS server to use
+ fail:
+ msg: |
+ No NFS hosts detected or defined but storage class is set to
+ 'nfs'. Add hosts to your [nfs] group or define one manually with
+ the 'openshift_management_storage_nfs_local_hostname' parameter
+ when:
+ - openshift_management_storage_class == 'nfs'
+ # You haven't created any NFS groups
+ - (groups.nfs is defined and groups.nfs | length == 0) or (groups.nfs is not defined)
+ # You did not manually specify a host to use
+ - (openshift_management_storage_nfs_local_hostname is not defined) or (openshift_management_storage_nfs_local_hostname == false)
+
+######################################################################
+# STORAGE PARAMS -CLOUD PROVIDER
+- name: Validate Cloud Provider storage class
+ assert:
+ that:
+ - openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'
+ msg: |
+ openshift_management_storage_class is 'cloudprovider' but you have an
+ invalid kind defined, '{{ openshift_cloudprovider_kind }}'. See
+ 'openshift_cloudprovider_kind' in the example inventories for
+ the required parameters for your selected cloud
+ provider. Working providers: 'aws' and 'gce'.
+ when:
+ - openshift_management_storage_class == 'cloudprovider'
+ - openshift_cloudprovider_kind is defined
+
+- name: Validate 'cloudprovider' Storage Class has required parameters defined
+ assert:
+ that:
+ - openshift_cloudprovider_kind is defined
+ msg: |
+ openshift_management_storage_class is 'cloudprovider' but you do not
+ have 'openshift_cloudprovider_kind' defined, this is
+ required. Search the example inventories for
+ 'openshift_cloudprovider_kind'. The required parameters for your
+ selected cloud provider must be defined in your inventory as
+ well. Working providers: 'aws' and 'gce'.
+ when:
+ - openshift_management_storage_class == 'cloudprovider'
+
+######################################################################
+# DATABASE CONNECTION VALIDATION
+- name: Validate all required database parameters were provided for ext-db template
+ assert:
+ that:
+ - item in openshift_management_template_parameters
+ msg: |
+ "You are using external database services but a required
+ database parameter {{ item }} was not found in
+ 'openshift_management_template_parameters'"
+ with_items: "{{ __openshift_management_required_db_conn_params }}"
+ when:
+ - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml
new file mode 100644
index 000000000..da3ad0af7
--- /dev/null
+++ b/roles/openshift_management/vars/main.yml
@@ -0,0 +1,76 @@
+---
+# Misc enumerated values
+#---------------------------------------------------------------------
+# Allowed choices for the storage class parameter
+__openshift_management_storage_classes:
+ - nfs
+ - nfs_external
+ - preconfigured
+ - cloudprovider
+
+#---------------------------------------------------------------------
+# DEFAULT PV SIZES
+# How large to make the MIQ application PV
+__openshift_management_app_pv_size: 5Gi
+# How large to make the MIQ PostgreSQL PV
+__openshift_management_db_pv_size: 15Gi
+
+# Name of the application templates with object/parameter definitions
+__openshift_management_app_templates:
+ - miq-template-ext-db
+ - miq-template
+ - cfme-template-ext-db
+ - cfme-template
+
+# PostgreSQL database connection parameters
+__openshift_management_db_parameters:
+ - DATABASE_USER
+ - DATABASE_PASSWORD
+ - DATABASE_IP
+ - DATABASE_PORT
+ - DATABASE_NAME
+
+# # Commented out until we can support both CFME and MIQ
+# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}"
+#openshift_management_flavor: cloudforms
+openshift_management_flavor: manageiq
+# TODO: Make this conditional as well based on the prior variable
+# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}"
+# openshift_management_flavor_short: cfme
+openshift_management_flavor_short: miq
+
+######################################################################
+# ACCOUNTING
+######################################################################
+# Service Account SSCs
+__openshift_system_account_sccs:
+ - name: -anyuid
+ resource_name: anyuid
+ - name: -orchestrator
+ resource_name: anyuid
+ - name: -privileged
+ resource_name: privileged
+ - name: -httpd
+ resource_name: anyuid
+
+# Service Account Roles
+__openshift_management_system_account_roles:
+ - name: -orchestrator
+ resource_name: view
+ - name: -orchestrator
+ resource_name: edit
+
+######################################################################
+# DEFAULTS
+######################################################################
+# User only has to provide parameters they need to override, we will
+# do a hash update method with the provided user parameters to create
+# the final connection structure.
+#
+# TODO: Update user provided configs with this if they are missing fields
+__openshift_management_required_db_conn_params:
+ - DATABASE_USER
+ - DATABASE_PASSWORD
+ - DATABASE_IP
+ - DATABASE_PORT
+ - DATABASE_NAME
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 86fa57b50..2dcc56e3f 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Master
+OpenShift Master
==================================
Master service installation
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 4c8d6fdad..7e62a8c6d 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,4 +1,9 @@
---
+# openshift_master_defaults_in_use is a workaround to detect if we are consuming
+# the plays from the role or outside of the role.
+openshift_master_defaults_in_use: True
+openshift_master_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
@@ -20,12 +25,15 @@ r_openshift_master_os_firewall_allow:
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+containerized_svc_dir: "/usr/lib/systemd/system"
+ha_svc_template_path: "native-cluster"
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index b0237141b..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml
new file mode 100644
index 000000000..7e8a7a596
--- /dev/null
+++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml
@@ -0,0 +1,14 @@
+---
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ register: l_api_available_output
+ until: l_api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ run_once: true
+ changed_when: false
diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml
new file mode 100644
index 000000000..b0590ac84
--- /dev/null
+++ b/roles/openshift_master/tasks/configure_external_etcd.yml
@@ -0,0 +1,17 @@
+---
+- name: Remove etcdConfig section
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: "etcdConfig"
+ state: absent
+- name: Set etcdClientInfo.ca to master.etcd-ca.crt
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.ca
+ value: master.etcd-ca.crt
+- name: Set etcdClientInfo.urls to the external etcd
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.urls
+ value:
+ - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 894fe8e2b..824a5886e 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -180,6 +180,28 @@
- name: Install the systemd units
include: systemd_units.yml
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ systemd:
+ name: systemd-journald
+ state: restarted
+ when: journald_update | changed
+
- name: Install Master system container
include: system_container.yml
when:
@@ -200,7 +222,7 @@
- restart master api
- set_fact:
- translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
@@ -229,8 +251,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- include: registry_auth.yml
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
@@ -291,23 +311,7 @@
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
-- name: Wait for API to become available
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- register: l_api_available_output
- until: l_api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- run_once: true
- changed_when: false
+- include: check_master_api_is_ready.yml
when:
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 96b6c614e..63d483760 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -5,23 +5,21 @@
when: oreg_auth_user is defined
register: master_oreg_auth_credentials_stat
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
- - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create
notify:
- restart master api
- restart master controllers
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
when:
+ - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
+ - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 7a918c57e..fcc66044b 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,29 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks. For that reason the ha_svc variables are use set_fact instead of
-# the vars directory on the role.
+# systemd_units.yml is included both in the openshift_master role and in the upgrade
+# playbooks.
-# This play may be consumed outside the role, we need to ensure that
-# openshift_master_config_dir is set.
-- name: Set openshift_master_config_dir if unset
- set_fact:
- openshift_master_config_dir: '/etc/origin/master'
- when: openshift_master_config_dir is not defined
-
-# This play may be consumed outside the role, we need to ensure that
-# r_openshift_master_data_dir is set.
-- name: Set r_openshift_master_data_dir if unset
- set_fact:
- r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
- when: r_openshift_master_data_dir is not defined
-
-- name: Remove the legacy master service if it exists
- include: clean_systemd_units.yml
-
-- name: Init HA Service Info
- set_fact:
- containerized_svc_dir: "/usr/lib/systemd/system"
- ha_svc_template_path: "native-cluster"
+- include: upgrade_facts.yml
+ when: openshift_master_defaults_in_use is not defined
- name: Set HA Service Info for containerized installs
set_fact:
@@ -32,6 +12,11 @@
when:
- openshift.common.is_containerized | bool
+- include: registry_auth.yml
+
+- name: Remove the legacy master service if it exists
+ include: clean_systemd_units.yml
+
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
command: >
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
new file mode 100644
index 000000000..f6ad438aa
--- /dev/null
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -0,0 +1,33 @@
+---
+# This file exists because we call systemd_units.yml from outside of the role
+# during upgrades. When we remove this pattern, we can probably
+# eliminate most of these set_fact items.
+
+- name: Set openshift_master_config_dir if unset
+ set_fact:
+ openshift_master_config_dir: '/etc/origin/master'
+ when: openshift_master_config_dir is not defined
+
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
+- set_fact:
+ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+ when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+ when: oreg_host is not defined
+
+- name: Set openshift_master_debug_level
+ set_fact:
+ openshift_master_debug_level: "{{ debug_level | default(2) }}"
+ when:
+ - openshift_master_debug_level is not defined
+
+- name: Init HA Service Info
+ set_fact:
+ containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}"
+ ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index b931f1414..7ec26ceb7 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index a184a59f6..5d4a99c97 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 2ded05f53..f93f3b565 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -19,7 +19,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index d045b402b..9b3fbcf49 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -251,11 +251,7 @@ servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
bindNetwork: tcp4
certFile: master.server.crt
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- clientCA: ca-bundle.crt
-{% else %}
clientCA: ca.crt
-{% endif %}
keyFile: master.server.key
maxRequestsInFlight: {{ openshift.master.max_requests_inflight }}
requestTimeoutSeconds: 3600
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 63eb3ea1b..cc21b37af 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 0adfd05b6..493fc510e 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index cf39b73f6..0c681c764 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,3 +20,22 @@ openshift_master_valid_grant_methods:
- deny
openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index 018186e86..300b2cbff 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -12,6 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_master_facts
-- role: openshift_ca
+dependencies: []
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index a80313505..d0dcdae4b 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,5 +1,5 @@
---
-openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+openshift_master_default_subdomain: "router.default.svc.cluster.local"
openshift_master_admission_plugin_config:
openshift.io/ImagePolicy:
configuration:
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 5558f55cb..f7f3ac2b1 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -6,10 +6,6 @@ Custom filters for use in openshift-master
import copy
import sys
-# pylint import-error disabled because pylint cannot find the package
-# when installed in a virtualenv
-from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
-
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
@@ -82,23 +78,8 @@ class IdentityProviderBase(object):
self._allow_additional = True
@staticmethod
- def validate_idp_list(idp_list, openshift_version, deployment_type):
+ def validate_idp_list(idp_list):
''' validates a list of idps '''
- login_providers = [x.name for x in idp_list if x.login]
-
- multiple_logins_unsupported = False
- if len(login_providers) > 1:
- if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
- if LooseVersion(openshift_version) < LooseVersion('3.2'):
- multiple_logins_unsupported = True
- if deployment_type in ['origin']:
- if LooseVersion(openshift_version) < LooseVersion('1.2'):
- multiple_logins_unsupported = True
- if multiple_logins_unsupported:
- raise errors.AnsibleFilterError("|failed multiple providers are "
- "not allowed for login. login "
- "providers: {0}".format(', '.join(login_providers)))
-
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
@@ -380,11 +361,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = '"true"' if val else '"false"'
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
@@ -476,7 +452,7 @@ class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
- def translate_idps(idps, api_version, openshift_version, deployment_type):
+ def translate_idps(idps, api_version):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
@@ -492,7 +468,7 @@ class FilterModule(object):
idp_inst.set_provider_items()
idp_list.append(idp_inst)
- IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
+ IdentityProviderBase.validate_idp_list(idp_list)
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
diff --git a/roles/openshift_master_facts/lookup_plugins/oo_option.py b/roles/openshift_master_facts/lookup_plugins/oo_option.py
deleted file mode 120000
index 5ae43f8dd..000000000
--- a/roles/openshift_master_facts/lookup_plugins/oo_option.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins/oo_option.py \ No newline at end of file
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index c45f255af..f27eb629d 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -101,7 +101,7 @@ class LookupModule(LookupBase):
{'name': 'MatchInterPodAffinity'}
])
- if short_version in ['3.5', '3.6', '3.7']:
+ if short_version in ['3.5', '3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
@@ -114,6 +114,21 @@ class LookupModule(LookupBase):
{'name': 'CheckNodeDiskPressure'},
])
+ if short_version in ['3.7']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
if regions_enabled:
region_predicate = {
'name': 'Region',
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index fa228af2a..501be148e 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,5 +1,4 @@
---
-
# Ensure the default sub-domain is set:
- name: Migrate legacy osm_default_subdomain fact
set_fact:
@@ -35,7 +34,6 @@
cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 4a28fb8f8..38a918803 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -57,6 +57,20 @@ DEFAULT_PREDICATES_1_5 = [
DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+DEFAULT_PREDICATES_3_7 = [
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+]
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -79,6 +93,8 @@ TEST_VARS = [
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('3.6', 'origin', DEFAULT_PREDICATES_3_6),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
+ ('3.7', 'origin', DEFAULT_PREDICATES_3_7),
+ ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7),
]
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 1f10de4a2..ed698daca 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -39,6 +39,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics.
+- `openshift_metrics_hawkular_route_annotations`: Dictionary with annotations for the Hawkular route.
+
- `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the
initial cluster.
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index d9a17ae7f..8da74430f 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -1,6 +1,6 @@
---
openshift_metrics_start_cluster: True
-openshift_metrics_install_metrics: True
+openshift_metrics_install_metrics: False
openshift_metrics_startup_timeout: 500
openshift_metrics_hawkular_replicas: 1
@@ -12,11 +12,12 @@ openshift_metrics_hawkular_cert: ""
openshift_metrics_hawkular_key: ""
openshift_metrics_hawkular_ca: ""
openshift_metrics_hawkular_nodeselector: ""
+openshift_metrics_hawkular_route_annotations: {}
openshift_metrics_cassandra_replicas: 1
-openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
-openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
-openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}"
+openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}"
+openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -53,9 +54,12 @@ openshift_metrics_master_url: https://kubernetes.default.svc
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
-openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
+openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
openshift_metrics_heapster_allowed_users: system:master-proxy
+
+openshift_metrics_cassandra_enable_prometheus_endpoint: True
+openshift_metrics_hawkular_enable_prometheus_endpoint: True
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index 6b37f85ab..b63f5ca8c 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -40,6 +40,7 @@
dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml"
vars:
name: hawkular-metrics
+ annotations: "{{ openshift_metrics_hawkular_route_annotations }}"
labels:
metrics-infra: hawkular-metrics
host: "{{ openshift_metrics_hawkular_hostname }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index eaabdd20f..10509fc1e 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -43,7 +43,13 @@
check_mode: no
tags: metrics_init
-- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: install_metrics.yaml
+ when:
+ - openshift_metrics_install_metrics | bool
+
+- include: uninstall_metrics.yaml
+ when:
+ - not openshift_metrics_install_metrics | bool
- include: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index fc82f49b1..6f341bcfb 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -56,6 +56,8 @@ spec:
value: "/cassandra_data"
- name: JVM_OPTS
value: "-Dcassandra.commitlog.ignorereplayerrors=true"
+ - name: ENABLE_PROMETHEUS_ENDPOINT
+ value: "{{ openshift_metrics_cassandra_enable_prometheus_endpoint }}"
- name: TRUSTSTORE_NODES_AUTHORITIES
value: "/hawkular-cassandra-certs/tls.peer.truststore.crt"
- name: TRUSTSTORE_CLIENT_AUTHORITIES
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 9a9363075..59f7fb44a 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -55,6 +55,7 @@ spec:
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
+ - "-Dhawkular.metrics.jmx-reporting-enabled"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -66,6 +67,8 @@ spec:
value: "{{ 17 | oo_random_word }}"
- name: TRUSTSTORE_AUTHORITIES
value: "/hawkular-metrics-certs/tls.truststore.crt"
+ - name: ENABLE_PROMETHEUS_ENDPOINT
+ value: "{{ openshift_metrics_hawkular_enable_prometheus_endpoint }}"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
index 423ab54a3..253d6ecf5 100644
--- a/roles/openshift_metrics/templates/route.j2
+++ b/roles/openshift_metrics/templates/route.j2
@@ -2,6 +2,9 @@ apiVersion: v1
kind: Route
metadata:
name: {{ name }}
+{% if annotations is defined %}
+ annotations: {{ annotations | to_yaml }}
+{% endif %}
{% if labels is defined and labels %}
labels:
{% for k, v in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml
index 678c4104c..8704ddfa0 100644
--- a/roles/openshift_metrics/vars/default_images.yml
+++ b/roles/openshift_metrics/vars/default_images.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}"
+__openshift_metrics_image_prefix: "docker.io/openshift/origin-"
+__openshift_metrics_image_version: "latest"
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f0bdac7d2..5a1728de5 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
-__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}"
+__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_metrics_image_version: "v3.7"
diff --git a/roles/openshift_named_certificates/defaults/main.yml b/roles/openshift_named_certificates/defaults/main.yml
new file mode 100644
index 000000000..a32e385ec
--- /dev/null
+++ b/roles/openshift_named_certificates/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml
deleted file mode 100644
index 7b097b443..000000000
--- a/roles/openshift_named_certificates/tasks/named_certificates.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: Clear named certificates
- file:
- path: "{{ named_certs_dir }}"
- state: absent
- when: overwrite_named_certs | bool
-
-- name: Ensure named certificate directory exists
- file:
- path: "{{ named_certs_dir }}"
- state: directory
- mode: 0700
-
-- name: Land named certificates
- copy:
- src: "{{ item.certfile }}"
- dest: "{{ named_certs_dir }}"
- with_items: "{{ openshift_master_named_certificates | default([]) }}"
-
-- name: Land named certificate keys
- copy:
- src: "{{ item.keyfile }}"
- dest: "{{ named_certs_dir }}"
- mode: 0600
- with_items: "{{ openshift_master_named_certificates | default([]) }}"
-
-- name: Land named CA certificates
- copy:
- src: "{{ item }}"
- dest: "{{ named_certs_dir }}"
- mode: 0600
- with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}"
diff --git a/roles/openshift_named_certificates/vars/main.yml b/roles/openshift_named_certificates/vars/main.yml
index 368e9bdac..7f891441d 100644
--- a/roles/openshift_named_certificates/vars/main.yml
+++ b/roles/openshift_named_certificates/vars/main.yml
@@ -1,10 +1,4 @@
---
-openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
-openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
-openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
-openshift_version: "{{ openshift_pkg_version | default('') }}"
-
overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/"
internal_hostnames: "{{ openshift.common.internal_hostnames }}"
diff --git a/roles/openshift_nfs/README.md b/roles/openshift_nfs/README.md
new file mode 100644
index 000000000..36ea36385
--- /dev/null
+++ b/roles/openshift_nfs/README.md
@@ -0,0 +1,17 @@
+OpenShift NFS
+=============
+
+Sets up basic NFS services on a cluster host.
+
+See [tasks/create_export.yml](tasks/create_export.yml) for
+instructions on using the export creation tasks file.
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_nfs/defaults/main.yml b/roles/openshift_nfs/defaults/main.yml
new file mode 100644
index 000000000..ee94c7c57
--- /dev/null
+++ b/roles/openshift_nfs/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+r_openshift_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_openshift_nfs_os_firewall_deny: []
+r_openshift_nfs_firewall_allow:
+- service: nfs
+ port: "2049/tcp"
diff --git a/roles/openshift_etcd_ca/meta/main.yml b/roles/openshift_nfs/meta/main.yml
index f1d669d6b..d7b5910f2 100644
--- a/roles/openshift_etcd_ca/meta/main.yml
+++ b/roles/openshift_nfs/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Tim Bielawa
- description: Meta role around the etcd_ca role
+ description: OpenShift Basic NFS Configuration
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.2
@@ -11,8 +11,6 @@ galaxy_info:
- 7
categories:
- cloud
- - system
dependencies:
-- role: openshift_etcd_facts
-- role: etcd_ca
- when: (etcd_ca_setup | default(True) | bool)
+- role: lib_utils
+- role: lib_os_firewall
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
new file mode 100644
index 000000000..39323904f
--- /dev/null
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -0,0 +1,34 @@
+---
+# Makes a new NFS export
+#
+# Include signature
+#
+# include_role:
+# role: openshift_nfs
+# tasks_from: create_export
+# vars:
+# l_nfs_base_dir: Base dir to exports
+# l_nfs_export_config: Name to prefix the .exports file with
+# l_nfs_export_name: Name of sub-directory of the export
+# l_nfs_options: Mount Options
+
+- name: Ensure CFME App NFS export directory exists
+ file:
+ path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+
+- name: "Create {{ l_nfs_export_name }} NFS export"
+ lineinfile:
+ path: "/etc/exports.d/{{ l_nfs_export_config }}.exports"
+ create: true
+ state: present
+ line: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }} {{ l_nfs_options }}"
+ register: created_export
+
+- name: Re-export NFS filesystems
+ command: exportfs -ar
+ when:
+ - created_export | changed
diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_nfs/tasks/firewall.yml
index 775b7d6d7..0898b2b5c 100644
--- a/roles/openshift_hosted/tasks/registry/firewall.yml
+++ b/roles/openshift_nfs/tasks/firewall.yml
@@ -1,5 +1,5 @@
---
-- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool
+- when: r_openshift_nfs_firewall_enabled | bool and not r_openshift_nfs_use_firewalld | bool
block:
- name: Add iptables allow rules
os_firewall_manage_iptables:
@@ -8,7 +8,7 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
+ with_items: "{{ r_openshift_nfs_firewall_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -17,9 +17,9 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
+ with_items: "{{ r_openshift_nfs_os_firewall_deny }}"
-- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool
+- when: r_openshift_nfs_firewall_enabled | bool and r_openshift_nfs_use_firewalld | bool
block:
- name: Add firewalld allow rules
firewalld:
@@ -28,7 +28,7 @@
immediate: true
state: enabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
+ with_items: "{{ r_openshift_nfs_firewall_allow }}"
- name: Remove firewalld allow rules
firewalld:
@@ -37,4 +37,4 @@
immediate: true
state: disabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
+ with_items: "{{ r_openshift_nfs_os_firewall_deny }}"
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
new file mode 100644
index 000000000..3070de495
--- /dev/null
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -0,0 +1,29 @@
+---
+- name: setup firewall
+ include: firewall.yml
+ static: yes
+
+- name: Install nfs-utils
+ package: name=nfs-utils state=present
+
+- name: Configure NFS
+ lineinfile:
+ dest: /etc/sysconfig/nfs
+ regexp: '^RPCNFSDARGS=.*$'
+ line: 'RPCNFSDARGS="-N 2 -N 3"'
+ register: nfs_config
+
+- name: Restart nfs-config
+ systemd: name=nfs-config state=restarted
+ when: nfs_config | changed
+
+- name: Ensure exports directory exists
+ file:
+ path: "{{ l_nfs_base_dir }}"
+ state: directory
+
+- name: Enable and start NFS services
+ systemd:
+ name: nfs-server
+ state: started
+ enabled: yes
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 32670b18e..67f697924 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Node
+OpenShift Node
================================
Node service installation
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 5424a64d2..298d1013f 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,4 +1,6 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
@@ -31,12 +33,9 @@ openshift_node_ami_prep_packages:
- python-dbus
- PyYAML
- yum-utils
-- python2-boto
-- python2-boto3
- cloud-utils-growpart
# gluster
- glusterfs-fuse
-- heketi-client
# nfs
- nfs-utils
- flannel
@@ -60,7 +59,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -79,9 +78,11 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
-oreg_url: ''
-oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 855b0a8d8..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index ce5ecb9d0..5bc7b9869 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,7 +17,5 @@ dependencies:
- role: lib_os_firewall
- role: openshift_clock
- role: openshift_docker
-- role: openshift_node_certificates
- when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b83b2c452..6bd2df362 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -29,7 +29,7 @@
line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
regexp: "^ExecStart=.*"
-- name: "systemctl enable {{ openshift_service_type }}-node"
+- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
systemd:
name: "{{ item }}"
enabled: no
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 1504d01af..e5fcaf9af 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,10 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Setup tuned
- include: tuned.yml
- static: yes
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -50,6 +46,22 @@
notify:
- restart node
+- name: Configure AWS Cloud Provider Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^AWS_ACCESS_KEY_ID='
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+ - regex: '^AWS_SECRET_ACCESS_KEY='
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ no_log: True
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
+ notify:
+ - restart node
+
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
deleted file mode 100644
index f92ff79b5..000000000
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 265bf2c46..1539d6e3b 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -27,5 +27,3 @@
docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index ff8d1942c..59b8bb76e 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -2,7 +2,8 @@
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
+ - (not ansible_selinux or ansible_selinux.status != 'enabled')
+ - deployment_type == 'openshift-enterprise'
- not openshift_use_crio | default(false)
- name: setup firewall
@@ -59,41 +60,22 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- include: registry_auth.yml
-
-- name: Configure AWS Cloud Provider Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- - regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
- no_log: True
- when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
- notify:
- - restart node
-
#### Storage class plugins here ####
- name: NFS storage plugin configuration
include: storage_plugins/nfs.yml
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index b2dceedbe..20d7a9539 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -9,4 +9,8 @@
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ values:
+ - "DNS_DOMAIN={{ openshift.common.dns_domain }}"
+ - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
+ - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index f370bb260..de396fb4b 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -5,21 +5,20 @@
when: oreg_auth_user is defined
register: node_oreg_auth_credentials_stat
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
- - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
notify:
- restart node
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
when:
+ - openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 6b4490f61..9c182ade6 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,11 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- when: not openshift.common.is_containerized | bool
+ src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ when: not openshift.common.is_node_system_container | bool
notify:
- reload systemd units
- restart node
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8734e7443..fa7238849 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 4ab10b95f..561aa01f4 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
+PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
@@ -34,6 +35,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
-v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
-v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
-v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \
-v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index 93216c1d2..0440bf11a 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
- system
-dependencies:
-- role: openshift_facts
+dependencies: []
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 61d2a5b51..230f0a28c 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -112,7 +112,9 @@ EOF
fi
sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
- if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
+ if ! grep -qw search ${NEW_RESOLV_CONF}; then
+ echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
+ elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
index b4a0c3583..9f98126a0 100644
--- a/roles/openshift_node_dnsmasq/handlers/main.yml
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -3,6 +3,7 @@
systemd:
name: NetworkManager
state: restarted
+ enabled: True
- name: restart dnsmasq
systemd:
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index d5fda7bd0..8a7da66c2 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -1,2 +1,11 @@
---
- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
+ when: not openshift_node_bootstrap | bool
+
+- name: Install NetworkManager during node_bootstrap provisioning
+ package:
+ name: NetworkManager
+ state: present
+ notify: restart NetworkManager
+
+- include: ./network-manager.yml
diff --git a/filter_plugins/openshift_node.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
index 50c360e97..69069f2dc 100644
--- a/filter_plugins/openshift_node.py
+++ b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
@@ -7,10 +7,10 @@ from ansible import errors
class FilterModule(object):
- ''' Custom ansible filters for use by openshift_node role'''
+ ''' Custom ansible filters for use by openshift_node_facts role'''
@staticmethod
- def get_dns_ip(openshift_dns_ip, hostvars):
+ def node_get_dns_ip(openshift_dns_ip, hostvars):
''' Navigates the complicated logic of when to set dnsIP
In all situations if they've set openshift_dns_ip use that
@@ -29,4 +29,4 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
- return {'get_dns_ip': self.get_dns_ip}
+ return {'node_get_dns_ip': self.node_get_dns_ip}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index c268c945e..b45130400 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -1,10 +1,4 @@
---
-- set_fact:
- openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- when:
- - openshift_node_debug_level is not defined
- - lookup('oo_option', 'openshift_node_debug_level') != ""
-
- name: Set node facts
openshift_facts:
role: "{{ item.role }}"
@@ -17,10 +11,9 @@
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
@@ -30,5 +23,5 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 5ad994df9..73b98ad90 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -1,4 +1,4 @@
-OpenShift/Atomic Enterprise Node upgrade
+OpenShift Node upgrade
=========
Role responsible for a single node upgrade.
@@ -49,7 +49,6 @@ From openshift.node:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
-| openshift.node.debug_level |---------------------|---------------------|
| openshift.node.node_image |---------------------|---------------------|
| openshift.node.ovs_image |---------------------|---------------------|
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 3d8704308..10b4c6977 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1,6 +1,14 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
openshift_use_openshift_sdn: True
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
+oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index e34319186..6bcf3072d 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -10,6 +10,8 @@
# tasks file for openshift_node_upgrade
+- include: registry_auth.yml
+
- name: Stop node and openvswitch services
service:
name: "{{ item }}"
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
new file mode 100644
index 000000000..de396fb4b
--- /dev/null
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -0,0 +1,24 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index afff2f8ba..226f5290c 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -6,7 +6,7 @@
# - openshift.node.ovs_image
# - openshift_use_openshift_sdn
# - openshift.common.service_type
-# - openshift.node.debug_level
+# - openshift_node_debug_level
# - openshift.common.config_base
# - openshift.common.http_proxy
# - openshift.common.portal_net
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 4c47f8c0d..aae35719c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 451412ab0..07d1ebc3c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
{% if openshift_use_openshift_sdn %}
Wants=openvswitch.service
+PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
@@ -21,7 +22,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 8d3d010e4..19e9a56b7 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,5 +9,4 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies:
-- role: openshift_hosted_facts
+dependencies: {}
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 18d6a1645..5aa8aecec 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -11,7 +11,7 @@ openshift_prometheus_node_selector: {"region":"infra"}
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
-openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 93bdda3e8..a9bce2fb1 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -107,7 +107,10 @@
- name: annotate prometheus service
command: >
{{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
- name: annotate alerts service
command: >
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index abd1997dd..ce3b51454 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -1,4 +1,4 @@
-OpenShift Repos
+OpenShift Repos
================
Configures repositories for an OpenShift installation
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | None | Possible values openshift-enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_repos/tasks/centos_repos.yml b/roles/openshift_repos/tasks/centos_repos.yml
new file mode 100644
index 000000000..7dc15af2a
--- /dev/null
+++ b/roles/openshift_repos/tasks/centos_repos.yml
@@ -0,0 +1,25 @@
+---
+# Note: OpenShift repositories under CentOS may be shipped through the
+# "centos-release-openshift-origin" package which configures the repository.
+# This task matches the file names provided by the package so that they are
+# not installed twice in different files and remains idempotent.
+
+- name: Configure origin gpg keys
+ copy:
+ src: "origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS"
+ dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS"
+ notify: refresh cache
+
+# openshift_release is formatted to a standard string in openshift_version role.
+# openshift_release is expected to be in format 'x.y.z...' here.
+# Here, we drop the '.' characters and try to match the correct repo template
+# for our corresponding openshift_release.
+- name: Configure correct origin release repository
+ template:
+ src: "{{ item }}"
+ dest: "/etc/yum.repos.d/{{ (item | basename | splitext)[0] }}"
+ with_first_found:
+ - "CentOS-OpenShift-Origin{{ (openshift_release | default('')).split('.') | join('') }}.repo.j2"
+ - "CentOS-OpenShift-Origin{{ ((openshift_release | default('')).split('.') | join(''))[0:2] }}.repo.j2"
+ - "CentOS-OpenShift-Origin.repo.j2"
+ notify: refresh cache
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index f972c0fd9..d41245093 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -30,30 +30,13 @@
- when: r_openshift_repos_has_run is not defined
block:
- # Note: OpenShift repositories under CentOS may be shipped through the
- # "centos-release-openshift-origin" package which configures the repository.
- # This task matches the file names provided by the package so that they are
- # not installed twice in different files and remains idempotent.
- - name: Configure origin repositories and gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
- notify: refresh cache
+ - include: centos_repos.yml
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
- openshift_deployment_type == 'origin'
- openshift_enable_origin_repo | default(true) | bool
- - name: Enable centos-openshift-origin-testing repository
- command: yum-config-manager --enable centos-openshift-origin-testing
- when: openshift_repos_enable_testing | bool
-
- name: Ensure clean repo cache in the event repos have been changed manually
debug:
msg: "First run of openshift_repos"
diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2
index 09364c26f..b0c036e7c 100644
--- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2
@@ -8,7 +8,7 @@ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
[centos-openshift-origin-testing]
name=CentOS OpenShift Origin Testing
baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin/
-enabled=0
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2
new file mode 100644
index 000000000..97e855d58
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin14.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin14]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin14/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin14/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin14-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin14/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2
new file mode 100644
index 000000000..5e756e680
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin15.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin15]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin15/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin15/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin15-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin15/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2
new file mode 100644
index 000000000..7050c95f5
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin36.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin36]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin36/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin36/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin36-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin36/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
new file mode 100644
index 000000000..72c47b8ee
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
@@ -0,0 +1,44 @@
+'''
+ Openshift Sanitize inventory class that provides useful filters used in Logging.
+'''
+
+
+import re
+
+
+# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+def vars_with_pattern(source, pattern=""):
+ ''' Returns a list of variables whose name matches the given pattern '''
+ if source == '':
+ return list()
+
+ var_list = list()
+
+ var_pattern = re.compile(pattern)
+
+ for item in source:
+ if var_pattern.match(item):
+ var_list.append(item)
+
+ return var_list
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs,
+ 'vars_with_pattern': vars_with_pattern
+ }
diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
new file mode 100644
index 000000000..f61801714
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+
+""" Ansible module to help with setting facts conditionally based on other facts """
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: conditional_set_fact
+
+short_description: This will set a fact if the value is defined
+
+description:
+ - "To avoid constant set_fact & when conditions for each var we can use this"
+
+author:
+ - Eric Wolinetz ewolinet@redhat.com
+'''
+
+
+EXAMPLES = '''
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+
+- name: Conditionally set fact
+ conditional_set_fact:
+ fact1: not_defined_variable
+ fact2: defined_variable
+
+'''
+
+
+def run_module():
+ """ The body of the module, we check if the variable name specified as the value
+ for the key is defined. If it is then we use that value as for the original key """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ facts=dict(type='dict', required=True),
+ vars=dict(required=False, type='dict', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ local_facts = dict()
+ is_changed = False
+
+ for param in module.params['vars']:
+ other_var = module.params['vars'][param]
+
+ if other_var in module.params['facts']:
+ local_facts[param] = module.params['facts'][other_var]
+ if not is_changed:
+ is_changed = True
+
+ return module.exit_json(changed=is_changed, # noqa: F405
+ ansible_facts=local_facts)
+
+
+def main():
+ """ main """
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
new file mode 100644
index 000000000..e534e0cca
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
@@ -0,0 +1,48 @@
+---
+# this is used to set the logging variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ logging_hostname: openshift_hosted_logging_hostname
+ logging_ops_hostname: openshift_hosted_logging_ops_hostname
+ logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size
+ logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size
+ openshift_logging_storage_kind: openshift_hosted_logging_storage_kind
+ openshift_logging_storage_host: openshift_hosted_logging_storage_host
+ openshift_logging_storage_labels: openshift_hosted_logging_storage_labels
+ openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size
+ openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind
+ openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host
+ openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels
+ openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size
+ openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster
+ openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret
+ openshift_logging_kibana_hostname: openshift_hosted_logging_hostname
+ openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname
+ openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source
+ openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head
+ openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram
+ openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector
+ openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram
+ openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes
+ openshift_logging_master_public_url: openshift_hosted_logging_master_public_url
+ openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix
+ openshift_logging_image_version: openshift_hosted_logging_deployer_version
+ openshift_logging_install_logging: openshift_hosted_logging_deploy
+
+
+- set_fact:
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size | default('10Gi') if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+ openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
new file mode 100644
index 000000000..279646981
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml
@@ -0,0 +1,17 @@
+---
+# this is used to set the metrics variables from deprecated values to the current variables names
+# this file should be deleted once variables are no longer honored
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes
+ openshift_metrics_storage_host: openshift_hosted_metrics_storage_host
+ openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory
+ openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name
+ openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size
+ openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels
+ openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix
+ openshift_metrics_image_version: openshift_hosted_metrics_deployer_version
+ openshift_metrics_install_metrics: openshift_hosted_metrics_deploy
+ openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
new file mode 100644
index 000000000..94d3acffc
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -0,0 +1,21 @@
+---
+
+- name: Check for usage of deprecated variables
+ set_fact:
+ __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']"
+ when:
+ - hostvars[inventory_hostname][item] is defined
+ with_items: "{{ __warn_deprecated_vars }}"
+
+- block:
+ - debug: msg="{{__deprecation_message}}"
+ - pause:
+ seconds: "{{ 10 }}"
+ when:
+ - __deprecation_message | default ('') | length > 0
+
+# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
+- name: Assign deprecated variables to correct counterparts
+ include: "{{ item }}"
+ with_fileglob:
+ - "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..e327ee9f5 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# We should print out deprecations prior to any failures so that if a play does fail for other reasons
+# the user would also be aware of any deprecated variables they should note to adjust
+- include: deprecations.yml
+
- name: Abort when conflicting deployment type variables are set
when:
- deployment_type is defined
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index 24e44ea85..39bf1780a 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -10,3 +10,25 @@
Starting in 3.6 openshift_use_dnsmasq must be true or critical features
will not function. This also means that NetworkManager must be installed
enabled and responsible for management of the primary interface.
+
+- set_fact:
+ __using_dynamic: True
+ when:
+ - hostvars[inventory_hostname][item] in ['dynamic']
+ with_items:
+ - "{{ hostvars[inventory_hostname] | vars_with_pattern(pattern='openshift_.*_storage_kind') }}"
+
+- name: Ensure that dynamic provisioning is set if using dynamic storage
+ when:
+ - dynamic_volumes_check | default(true) | bool
+ - not openshift_master_dynamic_provisioning_enabled | default(false) | bool
+ - not openshift_cloudprovider_kind is defined
+ - __using_dynamic is defined and __using_dynamic | bool
+ fail:
+ msg: |-
+ Using a storage kind of 'dynamic' without enabling dynamic provisioning nor
+ setting a cloud provider will cause generated PVCs to not be able to bind as
+ intended. Either update to not use a dynamic storage or set
+ openshift_master_dynamic_provisioning_enabled to True and set an
+ openshift_cloudprovider_kind. You can disable this check with
+ 'dynamic_volumes_check=False'.
diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index da48e42c1..0fc2372d2 100644
--- a/roles/openshift_sanitize_inventory/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
@@ -1,7 +1,78 @@
---
# origin uses community packages named 'origin'
-# online currently uses 'openshift' packages
-# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
-# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
-# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
+# openshift-enterprise uses Red Hat packages named 'atomic-openshift'
+known_openshift_deployment_types: ['origin', 'openshift-enterprise']
+
+__deprecation_header: "[DEPRECATION WARNING]:"
+
+# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release
+__warn_deprecated_vars:
+ # logging
+ - 'openshift_hosted_logging_deploy'
+ - 'openshift_hosted_logging_hostname'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_master_public_url'
+ - 'openshift_hosted_logging_elasticsearch_cluster_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_cluster_size'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_loggingops_storage_labels'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_size'
+ - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix'
+ - 'openshift_hosted_logging_elasticsearch_storage_group'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_storage_kind'
+ - 'openshift_hosted_loggingops_storage_kind'
+ - 'openshift_hosted_logging_storage_host'
+ - 'openshift_hosted_loggingops_storage_host'
+ - 'openshift_hosted_logging_storage_nfs_directory'
+ - 'openshift_hosted_loggingops_storage_nfs_directory'
+ - 'openshift_hosted_logging_storage_volume_name'
+ - 'openshift_hosted_loggingops_storage_volume_name'
+ - 'openshift_hosted_logging_storage_volume_size'
+ - 'openshift_hosted_loggingops_storage_volume_size'
+ - 'openshift_hosted_logging_enable_ops_cluster'
+ - 'openshift_hosted_logging_image_pull_secret'
+ - 'openshift_hosted_logging_curator_nodeselector'
+ - 'openshift_hosted_logging_curator_ops_nodeselector'
+ - 'openshift_hosted_logging_kibana_nodeselector'
+ - 'openshift_hosted_logging_kibana_ops_nodeselector'
+ - 'openshift_hosted_logging_ops_hostname'
+ - 'openshift_hosted_logging_fluentd_nodeselector_label'
+ - 'openshift_hosted_logging_journal_source'
+ - 'openshift_hosted_logging_journal_read_from_head'
+ - 'openshift_hosted_logging_elasticsearch_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_nodeselector'
+ - 'openshift_hosted_logging_elasticsearch_ops_instance_ram'
+ - 'openshift_hosted_logging_elasticsearch_ops_nodeselector'
+ - 'openshift_hosted_logging_storage_access_modes'
+ - 'openshift_hosted_logging_deployer_prefix'
+ - 'openshift_hosted_logging_deployer_version'
+ # metrics
+ - 'openshift_hosted_metrics_deploy'
+ - 'openshift_hosted_metrics_storage_kind'
+ - 'openshift_hosted_metrics_storage_access_modes'
+ - 'openshift_hosted_metrics_storage_host'
+ - 'openshift_hosted_metrics_storage_nfs_directory'
+ - 'openshift_hosted_metrics_storage_volume_name'
+ - 'openshift_hosted_metrics_storage_volume_size'
+ - 'openshift_hosted_metrics_storage_labels'
+ - 'openshift_hosted_metrics_deployer_prefix'
+ - 'openshift_hosted_metrics_deployer_version'
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index 16a307c06..000000000
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1 +0,0 @@
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 746c73eaf..e202ae173 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -6,8 +6,6 @@
register: mktemp
changed_when: False
-- include: wire_aggregator.yml
-
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
with_first_found:
@@ -25,10 +23,22 @@
name: "kube-service-catalog"
node_selector: ""
-- name: Make kube-service-catalog project network global
- command: >
- oc adm pod-network make-projects-global kube-service-catalog
- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
+- when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
+ block:
+ - name: Waiting for netnamespace kube-service-catalog to be ready
+ oc_obj:
+ kind: netnamespace
+ name: kube-service-catalog
+ state: list
+ register: get_output
+ until: not get_output.results.stderr is defined
+ retries: 30
+ delay: 1
+ changed_when: false
+
+ - name: Make kube-service-catalog project network global
+ command: >
+ oc adm pod-network make-projects-global kube-service-catalog
- include: generate_certs.yml
@@ -112,15 +122,6 @@
when:
- not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
-- shell: >
- oc get policybindings/kube-system:default -n kube-system || echo "not found"
- register: get_kube_system
- changed_when: no
-
-- command: >
- oc create policybinding kube-system -n kube-system
- when: "'not found' in get_kube_system.stdout"
-
- oc_adm_policy_user:
namespace: kube-service-catalog
resource_kind: scc
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
index 4df60e9a8..cab9cc7d8 100644
--- a/roles/openshift_service_catalog/vars/openshift-enterprise.yml
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-"
-__openshift_service_catalog_image_version: "v3.6"
+__openshift_service_catalog_image_version: "v3.7"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index bc0dde17d..51724f979 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -18,6 +18,17 @@
node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"
when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
+- name: Add namespace service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+ when: glusterfs_is_native or glusterfs_heketi_is_native
+
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -51,8 +62,8 @@
kind: pod
state: list
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until: "heketi_pod.results.results[0]['items'] | count == 0"
+ register: deploy_heketi_pod
+ until: "deploy_heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
@@ -103,7 +114,7 @@
state: list
kind: pod
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
+ register: deploy_heketi_pod
when: glusterfs_heketi_is_native
- name: Check if need to deploy deploy-heketi
@@ -111,9 +122,9 @@
glusterfs_heketi_deploy_is_missing: False
when:
- "glusterfs_heketi_is_native"
- - "heketi_pod.results.results[0]['items'] | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Check for existing heketi pod
oc_obj:
@@ -147,6 +158,21 @@
when:
- glusterfs_heketi_is_native
+- name: Get heketi admin secret
+ oc_secret:
+ state: list
+ namespace: "{{ glusterfs_namespace }}"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ decode: True
+ register: glusterfs_heketi_admin_secret
+
+- name: Set heketi admin key
+ set_fact:
+ glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}"
+ when:
+ - glusterfs_is_native
+ - glusterfs_heketi_admin_secret.results.results[0]
+
- name: Generate heketi admin key
set_fact:
glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
@@ -190,14 +216,37 @@
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: deploy_heketi_pod
+ until:
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when:
+ - glusterfs_heketi_is_native
+ - not glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
+- name: Place heketi topology on heketi Pod
+ shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_is_native
+
- name: Load heketi topology
command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 8c3e31fc9..932d06038 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -55,16 +55,6 @@
- glusterfs_wipe
- item.stdout_lines | count > 0
-- name: Add service accounts to privileged SCC
- oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
- resource_kind: scc
- resource_name: privileged
- state: present
- with_items:
- - 'default'
- - 'router'
-
- name: Label GlusterFS nodes
oc_label:
name: "{{ hostvars[item].openshift.node.nodename }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 3ba1eb2d2..73396c9af 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,16 +6,6 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create heketi topology secret
- oc_secret:
- namespace: "{{ glusterfs_namespace }}"
- state: present
- name: "heketi-{{ glusterfs_name }}-topology-secret"
- force: True
- files:
- - name: topology.json
- path: "{{ mktemp.stdout }}/topology.json"
-
- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -39,18 +29,7 @@
HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- TOPOLOGY_PATH: "{{ mktemp.stdout }}"
-- name: Wait for deploy-heketi pod
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- kind: pod
- state: list
- selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until:
- - "heketi_pod.results.results[0]['items'] | count > 0"
- # Pod's 'Ready' status must be True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
- delay: 10
- retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+- name: Set heketi Deployed fact
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index afc04a537..074904bec 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,10 +1,10 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 51f8f4e0e..3047fbaf9 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -31,9 +31,9 @@
group: nfsnobody
with_items:
- "{{ openshift.hosted.registry }}"
- - "{{ openshift.hosted.metrics }}"
- - "{{ openshift.hosted.logging }}"
- - "{{ openshift.hosted.loggingops }}"
+ - "{{ openshift.metrics }}"
+ - "{{ openshift.logging }}"
+ - "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
- name: Configure exports
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 7e8f70b23..0141e0d25 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,5 +1,5 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
-{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }}
-{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }}
+{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
+{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
+{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 204abe27e..f4e9ff43a 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,11 +5,15 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
- name: Install the base package for versioning
package:
name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not is_containerized | bool
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
@@ -162,7 +166,9 @@
- set_fact:
openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
- fail:
msg: openshift_version role was unable to set openshift_version
@@ -177,7 +183,10 @@
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
+
- fail:
msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index a2a579e9d..b727eb74d 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
@@ -22,7 +22,9 @@
command: >
docker run --rm {{ openshift.common.cli_image }}:latest version
register: cli_image_version
- when: openshift_version is not defined
+ when:
+ - openshift_version is not defined
+ - not l_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
@@ -31,6 +33,7 @@
- openshift_version is not defined
- openshift.common.deployment_type == 'origin'
- cli_image_version.stdout_lines[0].split('-') | length > 1
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -45,14 +48,14 @@
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
# TODO: figure out a way to check for the openshift_version when using CRI-O.
# We should do that using the images in the ostree storage so we don't have
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 39d59db70..fa74c9953 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -3,20 +3,17 @@
command: subscription-manager repos --disable="*"
- set_fact:
- default_ose_version: '3.0'
- when: deployment_type == 'enterprise'
-
-- set_fact:
default_ose_version: '3.6'
- when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+ when: deployment_type == 'openshift-enterprise'
- set_fact:
- ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
+ ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
- when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ when:
+ - deployment_type == 'openshift-enterprise'
+ - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..b06f51908 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -4,10 +4,10 @@
# to make it able to enable repositories
- set_fact:
- rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}"
- rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
- rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
- rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
+ rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
+ rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
+ rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
+ rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
- fail:
msg: "This role is only supported for Red Hat hosts"
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -58,10 +62,12 @@
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
- command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml
when:
- - deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
+ - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
new file mode 100644
index 000000000..fb407c4a2
--- /dev/null
+++ b/roles/template_service_broker/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# placeholder file?
+template_service_broker_remove: False
+template_service_broker_install: False
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..b3a3d3428
--- /dev/null
+++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true;
diff --git a/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/roles/template_service_broker/files/remove-openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/roles/etcd_ca/meta/main.yml b/roles/template_service_broker/meta/main.yml
index e3e2f7781..ab5a0cf08 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/template_service_broker/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: Etcd CA
+ author: OpenShift Red Hat
+ description: OpenShift Template Service Broker
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.1
@@ -11,6 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
- - system
-dependencies:
-- role: etcd_common
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
new file mode 100644
index 000000000..f5fd6487c
--- /dev/null
+++ b/roles/template_service_broker/tasks/install.yml
@@ -0,0 +1,77 @@
+---
+# Fact setting
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
+ template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
+ template_service_broker_image_name: "{{ template_service_broker_image_name | default(__template_service_broker_image_name) }}"
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: present
+
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_rbac_file }}"
+ - "{{ __tsb_broker_file }}"
+
+- name: Apply template file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f -
+
+# reconcile with rbac
+- name: Reconcile with RBAC file
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | oc auth reconcile -f -
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+# Check that the TSB is running
+- name: Verify that TSB is running
+ command: >
+ curl -k https://apiserver.openshift-template-service-broker.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+- set_fact:
+ openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ when: openshift_master_config_dir is undefined
+
+- slurp:
+ src: "{{ openshift_master_config_dir }}/ca.crt"
+ register: __ca_bundle
+
+# Register with broker
+- name: Register TSB with broker
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | oc apply -f -
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml
new file mode 100644
index 000000000..d7ca970c7
--- /dev/null
+++ b/roles/template_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: template_service_broker_install | default(false) | bool
+
+- include: remove.yml
+ when: template_service_broker_remove | default(false) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..f3afe65ed
--- /dev/null
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -0,0 +1,35 @@
+---
+- command: mktemp -d /tmp/tsb-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ become: no
+
+- copy:
+ src: "{{ __tsb_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __tsb_template_file }}"
+ - "{{ __tsb_broker_file }}"
+
+- name: Delete TSB broker
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f -
+
+- name: Delete TSB objects
+ shell: >
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f -
+
+- name: empty out tech preview extension file for service console UI
+ copy:
+ src: remove-openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- oc_project:
+ name: openshift-template-service-broker
+ state: absent
+
+- file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
+ become: no
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..77afe1f43
--- /dev/null
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -0,0 +1,4 @@
+---
+__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_version: "latest"
+__template_service_broker_image_name: "origin"
diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml
new file mode 100644
index 000000000..a65340f16
--- /dev/null
+++ b/roles/template_service_broker/vars/main.yml
@@ -0,0 +1,7 @@
+---
+__tsb_files_location: "../../../files/origin-components/"
+
+__tsb_template_file: "apiserver-template.yaml"
+__tsb_config_file: "apiserver-config.yaml"
+__tsb_rbac_file: "rbac-template.yaml"
+__tsb_broker_file: "template-service-broker-registration.yaml"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..dfab1e01b
--- /dev/null
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,4 @@
+---
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_version: "v3.7"
+__template_service_broker_image_name: "ose"
diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml
new file mode 100644
index 000000000..418a4b521
--- /dev/null
+++ b/roles/tuned/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+tuned_etc_directory: '/etc/tuned'
+tuned_templates_source: '../templates'
diff --git a/roles/etcd_common/meta/main.yml b/roles/tuned/meta/main.yml
index dfb1c7a2c..833d94c13 100644
--- a/roles/etcd_common/meta/main.yml
+++ b/roles/tuned/meta/main.yml
@@ -1,15 +1,13 @@
---
galaxy_info:
- author: Jason DeTiberus
- description:
+ author: Jiri Mencak
+ description: Restart the tuned daemon if present and make it use the recommended profile
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.3
platforms:
- name: EL
versions:
- 7
categories:
- cloud
- - system
-dependencies: []
diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/tuned/tasks/main.yml
index 425bf6a26..e95d274d5 100644
--- a/roles/openshift_node/tasks/tuned.yml
+++ b/roles/tuned/tasks/main.yml
@@ -12,8 +12,6 @@
- name: Set tuned OpenShift variables
set_fact:
openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
- tuned_etc_directory: '/etc/tuned'
- tuned_templates_source: '../templates/tuned'
- name: Ensure directory structure exists
file:
diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/tuned/templates/openshift-control-plane/tuned.conf
index f22f21065..f22f21065 100644
--- a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf
+++ b/roles/tuned/templates/openshift-control-plane/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/tuned/templates/openshift-node/tuned.conf
index 78c7d19c9..78c7d19c9 100644
--- a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf
+++ b/roles/tuned/templates/openshift-node/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/tuned/templates/openshift/tuned.conf
index 68ac5dadb..68ac5dadb 100644
--- a/roles/openshift_node/templates/tuned/openshift/tuned.conf
+++ b/roles/tuned/templates/openshift/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/tuned/templates/recommend.conf
index 5fa765798..086e5673d 100644
--- a/roles/openshift_node/templates/tuned/recommend.conf
+++ b/roles/tuned/templates/recommend.conf
@@ -1,8 +1,11 @@
-[openshift-node]
-/etc/origin/node/node-config.yaml=.*region=primary
-
[openshift-control-plane,master]
/etc/origin/master/master-config.yaml=.*
[openshift-control-plane,node]
/etc/origin/node/node-config.yaml=.*region=infra
+
+[openshift-control-plane,lb]
+/etc/haproxy/haproxy.cfg=.*
+
+[openshift-node]
+/etc/origin/node/node-config.yaml=.*
diff --git a/setup.py b/setup.py
index eaf23d47a..3b786e0fb 100644
--- a/setup.py
+++ b/setup.py
@@ -165,7 +165,7 @@ class OpenShiftAnsibleYamlLint(Command):
has_warnings = True
if has_errors or has_warnings:
- print('yammlint issues found')
+ print('yamllint issues found')
raise SystemExit(1)
diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go
index a92d6861d..8b79c48cb 100644
--- a/test/integration/openshift_health_checker/common.go
+++ b/test/integration/openshift_health_checker/common.go
@@ -25,7 +25,7 @@ func (p PlaybookTest) Run(t *testing.T) {
// A PlaybookTest is intended to be run in parallel with other tests.
t.Parallel()
- cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path)
+ cmd := exec.Command("ansible-playbook", "-e", "testing_skip_some_requirements=1", "-i", "/dev/null", p.Path)
cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1")
b, err := cmd.CombinedOutput()
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
index 393a4d6ba..6095beb95 100644
--- a/test/openshift_version_tests.py
+++ b/test/openshift_version_tests.py
@@ -17,39 +17,39 @@ class OpenShiftVersionTests(unittest.TestCase):
# Static tests for legacy filters.
legacy_gte_tests = [{'name': 'oo_version_gte_3_1_or_1_1',
- 'positive_enterprise_version': '3.2.0',
- 'negative_enterprise_version': '3.0.0',
+ 'positive_openshift-enterprise_version': '3.2.0',
+ 'negative_openshift-enterprise_version': '3.0.0',
'positive_origin_version': '1.2.0',
'negative_origin_version': '1.0.0'},
{'name': 'oo_version_gte_3_1_1_or_1_1_1',
- 'positive_enterprise_version': '3.2.0',
- 'negative_enterprise_version': '3.1.0',
+ 'positive_openshift-enterprise_version': '3.2.0',
+ 'negative_openshift-enterprise_version': '3.1.0',
'positive_origin_version': '1.2.0',
'negative_origin_version': '1.1.0'},
{'name': 'oo_version_gte_3_2_or_1_2',
- 'positive_enterprise_version': '3.3.0',
- 'negative_enterprise_version': '3.1.0',
+ 'positive_openshift-enterprise_version': '3.3.0',
+ 'negative_openshift-enterprise_version': '3.1.0',
'positive_origin_version': '1.3.0',
'negative_origin_version': '1.1.0'},
{'name': 'oo_version_gte_3_3_or_1_3',
- 'positive_enterprise_version': '3.4.0',
- 'negative_enterprise_version': '3.2.0',
+ 'positive_openshift-enterprise_version': '3.4.0',
+ 'negative_openshift-enterprise_version': '3.2.0',
'positive_origin_version': '1.4.0',
'negative_origin_version': '1.2.0'},
{'name': 'oo_version_gte_3_4_or_1_4',
- 'positive_enterprise_version': '3.5.0',
- 'negative_enterprise_version': '3.3.0',
+ 'positive_openshift-enterprise_version': '3.5.0',
+ 'negative_openshift-enterprise_version': '3.3.0',
'positive_origin_version': '1.5.0',
'negative_origin_version': '1.3.0'},
{'name': 'oo_version_gte_3_5_or_1_5',
- 'positive_enterprise_version': '3.6.0',
- 'negative_enterprise_version': '3.4.0',
+ 'positive_openshift-enterprise_version': '3.6.0',
+ 'negative_openshift-enterprise_version': '3.4.0',
'positive_origin_version': '3.6.0',
'negative_origin_version': '1.4.0'}]
def test_legacy_gte_filters(self):
for test in self.legacy_gte_tests:
- for deployment_type in ['enterprise', 'origin']:
+ for deployment_type in ['openshift-enterprise', 'origin']:
# Test negative case per deployment_type
self.assertFalse(
self.openshift_version_filters._filters[test['name']](
@@ -70,3 +70,7 @@ class OpenShiftVersionTests(unittest.TestCase):
self.assertFalse(
self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
"{}.{}".format(major, minor)))
+
+ def test_get_filters(self):
+ self.assertTrue(
+ self.openshift_version_filters.filters() == self.openshift_version_filters._filters)
diff --git a/utils/docs/config.md b/utils/docs/config.md
index 3677ffe2e..6d0c6896e 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -52,7 +52,6 @@ Indicates the version of configuration this file was written with. Current imple
The OpenShift variant to install. Currently valid options are:
* openshift-enterprise
- * atomic-enterprise
### variant_version (optional)