summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.papr.sh2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--README.md23
-rw-r--r--ansible.cfg4
-rw-r--r--docs/proposals/role_decomposition.md2
-rw-r--r--filter_plugins/oo_filters.py254
-rw-r--r--images/installer/root/exports/manifest.json2
-rwxr-xr-ximages/installer/root/usr/local/bin/run2
-rw-r--r--inventory/.gitignore (renamed from inventory/byo/.gitignore)0
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/hosts.example (renamed from inventory/byo/hosts.example)24
-rw-r--r--inventory/hosts.glusterfs.external.example (renamed from inventory/byo/hosts.byo.glusterfs.external.example)10
-rw-r--r--inventory/hosts.glusterfs.mixed.example (renamed from inventory/byo/hosts.byo.glusterfs.mixed.example)10
-rw-r--r--inventory/hosts.glusterfs.native.example (renamed from inventory/byo/hosts.byo.glusterfs.native.example)10
-rw-r--r--inventory/hosts.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.glusterfs.registry-only.example)8
-rw-r--r--inventory/hosts.glusterfs.storage-and-registry.example (renamed from inventory/byo/hosts.byo.glusterfs.storage-and-registry.example)8
-rw-r--r--inventory/hosts.openstack (renamed from inventory/byo/hosts.openstack)2
-rw-r--r--openshift-ansible.spec255
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml2
-rw-r--r--playbooks/adhoc/uninstall.yml7
-rw-r--r--playbooks/aws/README.md6
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml4
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml12
-rw-r--r--playbooks/aws/openshift-cluster/install.yml18
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml10
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini1
-rw-r--r--playbooks/byo/config.yml2
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml3
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml77
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml93
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml107
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml108
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml121
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml123
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml113
-rw-r--r--playbooks/container-runtime/config.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml28
l---------playbooks/container-runtime/private/roles1
-rw-r--r--playbooks/deploy_cluster.yml46
-rw-r--r--playbooks/gcp/provision.yml (renamed from playbooks/gcp/openshift-cluster/provision.yml)7
-rw-r--r--playbooks/init/evaluate_groups.yml7
-rw-r--r--playbooks/init/main.yml2
-rw-r--r--playbooks/openshift-etcd/private/ca.yml2
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml6
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml24
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml22
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml12
-rw-r--r--playbooks/openshift-etcd/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/backup.yml)3
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/main.yml)8
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml)24
-rw-r--r--playbooks/openshift-etcd/redeploy-ca.yml4
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml8
-rw-r--r--playbooks/openshift-etcd/upgrade.yml4
-rw-r--r--playbooks/openshift-glusterfs/README.md3
-rw-r--r--playbooks/openshift-glusterfs/private/registry.yml31
-rw-r--r--playbooks/openshift-hosted/private/create_persistent_volumes.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml7
-rw-r--r--playbooks/openshift-logging/config.yml9
-rw-r--r--playbooks/openshift-logging/private/config.yml (renamed from playbooks/common/openshift-cluster/openshift_logging.yml)0
l---------playbooks/openshift-logging/private/filter_plugins1
l---------playbooks/openshift-logging/private/library1
l---------playbooks/openshift-logging/private/lookup_plugins1
l---------playbooks/openshift-logging/private/roles1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml2
-rw-r--r--playbooks/openshift-master/private/config.yml6
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/openshift-master/private/scaleup.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml6
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml3
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/additional_config.yml1
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml1
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml1
-rw-r--r--playbooks/openshift-node/private/image_prep.yml12
-rw-r--r--playbooks/openshift-node/private/network_manager.yml2
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml4
-rw-r--r--playbooks/openshift-node/private/restart.yml8
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--playbooks/openstack/advanced-configuration.md177
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml7
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml13
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml6
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml1
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml25
-rw-r--r--playbooks/prerequisites.yml16
-rw-r--r--playbooks/redeploy-certificates.yml22
-rw-r--r--roles/calico/tasks/main.yml1
-rw-r--r--roles/container_runtime/README.md25
-rw-r--r--roles/container_runtime/defaults/main.yml57
-rw-r--r--roles/container_runtime/tasks/common/atomic_proxy.yml (renamed from roles/openshift_atomic/tasks/proxy.yml)0
-rw-r--r--roles/container_runtime/tasks/common/post.yml26
-rw-r--r--roles/container_runtime/tasks/common/pre.yml12
-rw-r--r--roles/container_runtime/tasks/common/setup_docker_symlink.yml38
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml28
-rw-r--r--roles/container_runtime/tasks/common/udev_workaround.yml (renamed from roles/container_runtime/tasks/udev_workaround.yml)0
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml27
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml67
-rw-r--r--roles/container_runtime/tasks/main.yml85
-rw-r--r--roles/container_runtime/tasks/package_docker.yml36
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml90
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml78
-rw-r--r--roles/container_runtime/templates/daemon.json4
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml13
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml8
-rw-r--r--roles/contiv_facts/tasks/rpm.yml9
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/flannel/defaults/main.yaml6
-rw-r--r--roles/flannel/handlers/main.yml2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/tasks/node.yaml4
-rw-r--r--roles/kuryr/templates/configmap.yaml.j21
-rw-r--r--roles/lib_utils/library/oo_ec2_group.py903
-rw-r--r--roles/nuage_master/handlers/main.yaml10
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_aws/defaults/main.yml41
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py35
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml11
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml37
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml14
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml37
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/provision.yml19
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml4
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml25
-rw-r--r--roles/openshift_aws/tasks/remove_scale_group.yml9
-rw-r--r--roles/openshift_aws/tasks/s3.yml2
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml36
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml8
-rw-r--r--roles/openshift_aws/tasks/security_group.yml30
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml6
-rw-r--r--roles/openshift_aws/tasks/setup_scale_group_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml24
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml22
-rw-r--r--roles/openshift_aws/templates/user_data.j26
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml5
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_cluster_autoscaler/tasks/main.yml2
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest1
l---------roles/openshift_examples/files/examples/v3.7/v3.81
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json3
l---------roles/openshift_examples/files/examples/v3.9/latest1
-rw-r--r--roles/openshift_excluder/README.md5
-rw-r--r--roles/openshift_excluder/defaults/main.yml2
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/main.yml5
-rw-r--r--roles/openshift_facts/defaults/main.yml100
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py491
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_health_checker/defaults/main.yml6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py2
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py5
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py9
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py7
-rw-r--r--roles/openshift_hosted/README.md7
-rw-r--r--roles/openshift_hosted/defaults/main.yml11
-rw-r--r--roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py2
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry.yml41
-rw-r--r--roles/openshift_hosted/tasks/router.yml18
-rw-r--r--roles/openshift_hosted/tasks/secure.yml14
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml10
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml2
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml4
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j22
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_hosted_facts/meta/main.yml15
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml19
-rw-r--r--roles/openshift_hosted_metrics/README.md54
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml31
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml18
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml132
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml75
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml21
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j22
-rw-r--r--roles/openshift_logging/defaults/main.yml10
-rw-r--r--roles/openshift_logging/handlers/main.yml8
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml18
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml6
-rw-r--r--roles/openshift_logging/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml1
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml1
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_management/README.md10
-rw-r--r--roles/openshift_management/defaults/main.yml2
-rw-r--r--roles/openshift_management/meta/main.yml1
-rw-r--r--roles/openshift_management/tasks/main.yml12
-rw-r--r--roles/openshift_management/tasks/storage/storage.yml2
-rw-r--r--roles/openshift_master/handlers/main.yml6
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml67
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml10
-rw-r--r--roles/openshift_master/tasks/restart.yml4
-rw-r--r--roles/openshift_master/tasks/system_container.yml10
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml47
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml12
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j216
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j216
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j222
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master_cluster/README.md34
-rw-r--r--roles/openshift_master_cluster/meta/main.yml15
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml43
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml14
-rw-r--r--roles/openshift_master_facts/defaults/main.yml1
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py26
-rw-r--r--roles/openshift_master_facts/tasks/main.yml12
-rw-r--r--roles/openshift_metrics/handlers/main.yml8
-rw-r--r--roles/openshift_nfs/tasks/setup.yml3
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/tasks/aws.yml2
-rw-r--r--roles/openshift_node/tasks/config.yml8
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/configure-proxy-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml2
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml17
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml4
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml8
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml8
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml8
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service8
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service22
-rw-r--r--roles/openshift_openstack/defaults/main.yml12
-rw-r--r--roles/openshift_openstack/tasks/check-prerequisites.yml8
-rw-r--r--roles/openshift_openstack/tasks/hostname.yml26
-rw-r--r--roles/openshift_openstack/tasks/node-configuration.yml6
-rw-r--r--roles/openshift_openstack/tasks/populate-dns.yml23
-rw-r--r--roles/openshift_openstack/tasks/provision.yml4
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j2136
-rw-r--r--roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py157
-rw-r--r--roles/openshift_persistent_volumes/defaults/main.yml9
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml57
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml17
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml17
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j22
-rw-r--r--roles/openshift_prometheus/README.md2
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml3
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_storage_glusterfs/README.md17
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml4
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml7
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml20
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j216
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml8
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml8
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml6
-rw-r--r--roles/rhel_subscribe/README.md29
-rw-r--r--roles/rhel_subscribe/defaults/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml21
-rw-r--r--roles/rhel_subscribe/tasks/main.yml31
-rw-r--r--setup.py56
-rw-r--r--[-rwxr-xr-x]test/integration/build-images.sh0
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml6
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml8
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml4
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml7
-rw-r--r--[-rwxr-xr-x]test/integration/run-tests.sh0
-rw-r--r--tox.ini3
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
409 files changed, 3535 insertions, 4609 deletions
diff --git a/.papr.sh b/.papr.sh
index 80453d4b2..c7b06f059 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -35,7 +35,7 @@ trap upload_journals ERR
# run the actual installer
# FIXME: override openshift_image_tag defined in the inventory until
# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
-ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
+ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
### DISABLING TESTS FOR NOW, SEE:
### https://github.com/openshift/openshift-ansible/pull/6132
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9db0b5c98..15dc27984 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.8.0-0.13.0 ./
+3.9.0-0.8.0 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index e52e47202..3788e9bfb 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -10,7 +10,7 @@ The table below outlines the defaults per `openshift_deployment_type`:
| openshift_deployment_type | origin | openshift-enterprise |
|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift |
+| **openshift_service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
diff --git a/README.md b/README.md
index cc88b60bf..fdedf2f19 100644
--- a/README.md
+++ b/README.md
@@ -16,20 +16,27 @@ tracked by
[#2720](https://github.com/openshift/openshift-ansible/issues/2720).
## Getting the correct version
+When choosing an openshift release, ensure that the necessary origin packages
+are available in your distribution's repository. By default, openshift-ansible
+will not configure extra repositories for testing or staging packages for
+end users.
-The
+We recommend using a release branch. We maintain stable branches
+corresponding to upstream Origin releases, e.g.: we guarantee an
+openshift-ansible 3.2 release will fully support an origin
+[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
+
+The most recent branch will often receive minor feature backports and
+fixes. Older branches will receive only critical fixes.
+
+In addition to the release branches, the master branch
[master branch](https://github.com/openshift/openshift-ansible/tree/master)
tracks our current work **in development** and should be compatible
with the
[Origin master branch](https://github.com/openshift/origin/tree/master)
(code in development).
-In addition to the master branch, we maintain stable branches
-corresponding to upstream Origin releases, e.g.: we guarantee an
-openshift-ansible 3.2 release will fully support an origin
-[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
-The most recent branch will often receive minor feature backports and
-fixes. Older branches will receive only critical fixes.
+
**Getting the right openshift-ansible release**
@@ -54,7 +61,7 @@ Install base dependencies:
Requirements:
-- Ansible >= 2.3.0.0
+- Ansible >= 2.4.1.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
diff --git a/ansible.cfg b/ansible.cfg
index 9900d28f8..e4d72553e 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
# work around privilege escalation timeouts in ansible:
timeout = 30
-# Uncomment to use the provided BYO inventory
-#inventory = inventory/byo/hosts.example
+# Uncomment to use the provided example inventory
+#inventory = inventory/hosts.example
[inventory]
# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 6434e24e7..37d080d5c 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -262,7 +262,7 @@ dependencies:
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
when: not ansible_check_mode and "not found" in logging_project_result.stderr
- name: Create logging cert directory
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index f9564499d..3eaf2aed5 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -375,6 +375,13 @@ def oo_split(string, separator=','):
return string.split(separator)
+def oo_list_to_dict(lst, separator='='):
+ """ This converts a list of ["k=v"] to a dictionary {k: v}.
+ """
+ kvs = [i.split(separator) for i in lst]
+ return {k: v for k, v in kvs}
+
+
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
@@ -690,249 +697,6 @@ def to_padded_yaml(data, level=0, indent=2, **kw):
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_" and translate
- legacy facts to their openshift_env counterparts.
-
- Ex: hostvars = {'openshift_fact': 42,
- 'theyre_taking_the_hobbits_to': 'isengard'}
- returns = {'openshift_fact': 42}
- '''
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- facts = {}
- regex = re.compile('^openshift_.*')
- for key in hostvars:
- if regex.match(key):
- facts[key] = hostvars[key]
-
- return facts
-
-
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals
-def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None):
- """ Generate list of persistent volumes based on oo_openshift_env
- storage options set in host variables for a specific component.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if not issubclass(type(groups), dict):
- raise errors.AnsibleFilterError("|failed expects groups is a dict")
-
- persistent_volume = None
-
- if component in hostvars['openshift']:
- if subcomponent is not None:
- storage_component = hostvars['openshift'][component][subcomponent]
- else:
- storage_component = hostvars['openshift'][component]
-
- if 'storage' in storage_component:
- params = storage_component['storage']
- kind = params['kind']
- if 'create_pv' in params:
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
- else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
-
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
-
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
-
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
- return persistent_volume
-
-
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
-def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
- """ Generate list of persistent volumes based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if not issubclass(type(groups), dict):
- raise errors.AnsibleFilterError("|failed expects groups is a dict")
- if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
-
- if persistent_volumes is None:
- persistent_volumes = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component)
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
-
- if 'logging' in hostvars['openshift']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- if 'loggingops' in hostvars['openshift']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- if 'metrics' in hostvars['openshift']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- if 'prometheus' in hostvars['openshift']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- if 'alertmanager' in hostvars['openshift']['prometheus']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- if 'alertbuffer' in hostvars['openshift']['prometheus']:
- persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer')
- if persistent_volume is not None:
- persistent_volumes.append(persistent_volume)
- return persistent_volumes
-
-
-def oo_component_pv_claims(hostvars, component, subcomponent=None):
- """ Generate list of persistent volume claims based on oo_openshift_env
- storage options set in host variables for a speicific component.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- if component in hostvars['openshift']:
- if subcomponent is not None:
- storage_component = hostvars['openshift'][component][subcomponent]
- else:
- storage_component = hostvars['openshift'][component]
-
- if 'storage' in storage_component:
- params = storage_component['storage']
- kind = params['kind']
- if 'create_pv' in params:
- if 'create_pvc' in params:
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- return persistent_volume_claim
- return None
-
-
-def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
- """ Generate list of persistent volume claims based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
-
- if persistent_volume_claims is None:
- persistent_volume_claims = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component)
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
-
- if 'logging' in hostvars['openshift']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- if 'loggingops' in hostvars['openshift']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- if 'metrics' in hostvars['openshift']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- if 'prometheus' in hostvars['openshift']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- if 'alertmanager' in hostvars['openshift']['prometheus']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- if 'alertbuffer' in hostvars['openshift']['prometheus']:
- persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer')
- if persistent_volume_claim is not None:
- persistent_volume_claims.append(persistent_volume_claim)
- return persistent_volume_claims
-
-
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
@@ -1212,6 +976,7 @@ class FilterModule(object):
"oo_combine_dict": oo_combine_dict,
"oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
+ "oo_list_to_dict": oo_list_to_dict,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": oo_parse_named_certificates,
@@ -1219,9 +984,6 @@ class FilterModule(object):
"oo_pretty_print_cluster": oo_pretty_print_cluster,
"oo_generate_secret": oo_generate_secret,
"oo_nodes_with_label": oo_nodes_with_label,
- "oo_openshift_env": oo_openshift_env,
- "oo_persistent_volumes": oo_persistent_volumes,
- "oo_persistent_volume_claims": oo_persistent_volume_claims,
"oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
"oo_pods_match_component": oo_pods_match_component,
"oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json
index 8b984d7a3..53696b03e 100644
--- a/images/installer/root/exports/manifest.json
+++ b/images/installer/root/exports/manifest.json
@@ -4,7 +4,7 @@
"OPTS": "",
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
- "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
"HOME_ROOT": "/root",
"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index cd38a6ff0..67cf7dfde 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -18,7 +18,7 @@ INVENTORY="$(mktemp)"
if [[ -v INVENTORY_FILE ]]; then
# Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
# does not attempt to modify the original
- cp -a ${INVENTORY_FILE} ${INVENTORY}
+ cp ${INVENTORY_FILE} ${INVENTORY}
elif [[ -v INVENTORY_DIR ]]; then
INVENTORY="$(mktemp -d)"
cp -R ${INVENTORY_DIR}/* ${INVENTORY}
diff --git a/inventory/byo/.gitignore b/inventory/.gitignore
index 6ff331c7e..6ff331c7e 100644
--- a/inventory/byo/.gitignore
+++ b/inventory/.gitignore
diff --git a/inventory/README.md b/inventory/README.md
index 5e26e3c32..2e348194f 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -1,5 +1 @@
-# OpenShift Ansible inventory config files
-
-You can install OpenShift on:
-
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
+# OpenShift Ansible example inventory config files
diff --git a/inventory/byo/hosts.example b/inventory/hosts.example
index 3a9944ba4..d857cd1a7 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/hosts.example
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
@@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Set cockpit plugins
#osm_cockpit_plugins=['cockpit-kubernetes']
-# Native high availability cluster method with optional load balancer.
+# Native high availability (default cluster method)
# If no lb group is defined, the installer assumes that a load balancer has
# been preconfigured. For installation the value of
# openshift_master_cluster_hostname must resolve to the load balancer
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
@@ -653,6 +641,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -662,6 +651,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -671,6 +661,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -684,6 +675,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -693,6 +685,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -702,6 +695,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
# which are destroyed when pods are deleted
@@ -1059,7 +1053,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
#openshift_management_username: admin
#openshift_management_password: smartvm
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/hosts.glusterfs.external.example
index acf68266e..bf2557cf0 100644
--- a/inventory/byo/hosts.byo.glusterfs.external.example
+++ b/inventory/hosts.glusterfs.external.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -44,7 +44,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example
index a559dc377..8a20a037e 100644
--- a/inventory/byo/hosts.byo.glusterfs.mixed.example
+++ b/inventory/hosts.glusterfs.mixed.example
@@ -1,11 +1,11 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -13,7 +13,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
@@ -47,7 +47,7 @@ node2 openshift_schedulable=True
master
# Specify the glusterfs group, which contains the nodes of the external
-# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
# and "glusterfs_devices" variables defined.
#
# The first variable indicates the hostname of the external GLusterFS node,
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/hosts.glusterfs.native.example
index ca4765c53..59acf1194 100644
--- a/inventory/byo/hosts.byo.glusterfs.native.example
+++ b/inventory/hosts.glusterfs.native.example
@@ -1,16 +1,16 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for applications. It
-# will also autmatically create a StorageClass for this purpose.
+# will also automatically create a StorageClass for this purpose.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster.
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example
index 32040f593..6f33e9f6d 100644
--- a/inventory/byo/hosts.byo.glusterfs.registry-only.example
+++ b/inventory/hosts.glusterfs.registry-only.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for exclusive use
# as storage for a natively hosted Docker registry.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
+# This inventory may also be used with openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example
index 9bd37cbf6..1f3a4282a 100644
--- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
+++ b/inventory/hosts.glusterfs.storage-and-registry.example
@@ -1,12 +1,12 @@
-# This is an example of a bring your own (byo) host inventory for a cluster
+# This is an example of an OpenShift-Ansible host inventory for a cluster
# with natively hosted, containerized GlusterFS storage for both general
# application use and a natively hosted Docker registry. It will also create a
# StorageClass for the general storage.
#
-# This inventory may be used with the byo/config.yml playbook to deploy a new
+# This inventory may be used with the deploy_cluster.yml playbook to deploy a new
# cluster with GlusterFS storage.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with openshift-glusterfs/config.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -14,7 +14,7 @@
#
# There are additional configuration parameters that can be specified to
# control the deployment and state of a GlusterFS cluster. Please see the
-# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# documentation in playbooks/openshift-glusterfs/README.md and
# roles/openshift_storage_glusterfs/README.md for additional details.
[OSEv3:children]
diff --git a/inventory/byo/hosts.openstack b/inventory/hosts.openstack
index c648078c4..d928c2b86 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/hosts.openstack
@@ -1,4 +1,4 @@
-# This is an example of a bring your own (byo) host inventory
+# This is an example of an OpenShift-Ansible host inventory
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7d543afdd..87423f59e 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,14 +10,14 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.0.0%{?dist}
+Release: 0.8.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.3
+Requires: ansible >= 2.4.1
Requires: python2
Requires: python-six
Requires: tar
@@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/byo/* docs/example-inventories/
+cp inventory/* docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -285,14 +285,161 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.8.0
+- Remove empty openshift_hosted_facts role (mgugino@redhat.com)
+- Refactor upgrade codepaths step 1 (mgugino@redhat.com)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.7.0
+- Remove bad openshift_examples symlink (rteague@redhat.com)
+- Changing the node group format to a list. (kwoodson@redhat.com)
+- Bump RPM version requirement (sdodson@redhat.com)
+- Clarify version selection in README (mgugino@redhat.com)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.6.0
+- add openshift_master_api_port var to example inventory (jdiaz@redhat.com)
+- Allow 2 sets of hostnames for openstack provider (bdobreli@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.5.0
+- Remove unneeded embedded etcd logic (mgugino@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.4.0
+- Copying upstream fix for ansible 2.4 ec2_group module. (kwoodson@redhat.com)
+- Add missing dependencies on openshift_facts role (sdodson@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.3.0
+- remove integration tests from tox (lmeyer@redhat.com)
+- correct ansible-playbook command syntax (jdiaz@redhat.com)
+- Add openshift_facts to upgrade plays for service_type (mgugino@redhat.com)
+- Check for openshift attribute before using it during CNS install.
+ (jmencak@redhat.com)
+
+* Mon Dec 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.2.0
+- GlusterFS: Add playbook doc note (jarrpa@redhat.com)
+- Fix openshift hosted registry rollout (rteague@redhat.com)
+- Remove container_runtime from the openshift_version (sdodson@redhat.com)
+
+* Fri Dec 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.1.0
+- Cleanup byo references (rteague@redhat.com)
+- openshift_node: reintroduce restart of CRI-O. (gscrivan@redhat.com)
+- container-engine: skip openshift_docker_log_driver when it is False
+ (gscrivan@redhat.com)
+- container-engine: log-opts is a dictionary in the daemon.json file
+ (gscrivan@redhat.com)
+- openshift_version: add dependency to openshift_facts (gscrivan@redhat.com)
+- openshift_version: define openshift_use_crio_only (gscrivan@redhat.com)
+- openshift_version: add dependency to container_runtime (gscrivan@redhat.com)
+- crio: define and use l_is_node_system_container (gscrivan@redhat.com)
+- Update deprecation checks - include: (rteague@redhat.com)
+- Add os_firewall to prerequisites.yml (mgugino@redhat.com)
+- add 3.8 templates for gluster ep and svc (lmeyer@redhat.com)
+- Remove openshift.common.service_type (mgugino@redhat.com)
+- Remove unused openshift_env_structures and openshift_env (mgugino@redhat.com)
+- Fix incorrect register name master registry auth (mgugino@redhat.com)
+- Include Deprecation: Convert to import_playbook (rteague@redhat.com)
+- add 3.8 templates for gluster ep and svc (m.judeikis@gmail.com)
+- Remove all uses of openshift.common.admin_binary (sdodson@redhat.com)
+- Implement container_runtime playbooks and changes (mgugino@redhat.com)
+- Playbook Consolidation - byo/config.yml (rteague@redhat.com)
+- openshift_logging_kibana: fix mixing paren (lmeyer@redhat.com)
+- Fix ami building. (kwoodson@redhat.com)
+- Include Deprecation: Convert to include_tasks (rteague@redhat.com)
+- Add missing symlinks in openshift-logging (rteague@redhat.com)
+- Fix generate_pv_pvcs_list plugin undef (mgugino@redhat.com)
+- Playbook Consolidation - etcd Upgrade (rteague@redhat.com)
+- bug 1519622. Disable rollback of ES DCs (jcantril@redhat.com)
+- Remove all references to pacemaker (pcs, pcsd) and
+ openshift.master.cluster_method. (abutcher@redhat.com)
+- Remove entry point files no longer needed by CI (rteague@redhat.com)
+- Don't check for the deployment_type (tomas@sedovic.cz)
+- Get the correct value out of openshift_release (tomas@sedovic.cz)
+- Fix oreg_auth_credentials_create register var (mgugino@redhat.com)
+- Fix and cleanup not required dns bits (bdobreli@redhat.com)
+- Fix hosted vars (mgugino@redhat.com)
+- Remove duplicate init import in network_manager.yml (rteague@redhat.com)
+- Document testing repos for dev purposes (bdobreli@redhat.com)
+- Remove unused protected_facts_to_overwrite (mgugino@redhat.com)
+- Use openshift testing repos for openstack (bdobreli@redhat.com)
+- Use openshift_release instead of ose_version (tomas@sedovic.cz)
+- Remove the ose_version check (tomas@sedovic.cz)
+- Allow number of retries in openshift_management to be configurable
+ (ealfassa@redhat.com)
+- Bumping to 3.9 (smunilla@redhat.com)
+- Cleanup unused openstack provider code (bdobreli@redhat.com)
+- Adding 3.9 tito releaser (smunilla@redhat.com)
+- Implement container runtime role (mgugino@redhat.com)
+- Fix glusterfs checkpoint info (rteague@redhat.com)
+- storage_glusterfs: fix typo (lmeyer@redhat.com)
+- Playbook Consolidation - Redeploy Certificates (rteague@redhat.com)
+- Fix tox (tomas@sedovic.cz)
+- Remove shell environment lookup (tomas@sedovic.cz)
+- Revert "Fix syntax error caused by an extra paren" (tomas@sedovic.cz)
+- Revert "Fix the env lookup fallback in rhel_subscribe" (tomas@sedovic.cz)
+- Remove reading shell environment in rhel_subscribe (tomas@sedovic.cz)
+- retry package operations (lmeyer@redhat.com)
+- Add v3.9 support (sdodson@redhat.com)
+- Playbook Consolidation - openshift-logging (rteague@redhat.com)
+- Do not escalate privileges in jks generation tasks (iacopo.rozzo@amadeus.com)
+- Fix inventory symlinks in origin-ansible container. (dgoodwin@redhat.com)
+- Initial upgrade for scale groups. (kwoodson@redhat.com)
+- Update the doc text (tomas@sedovic.cz)
+- Optionally subscribe OpenStack RHEL nodes (tomas@sedovic.cz)
+- Fix the env lookup fallback in rhel_subscribe (tomas@sedovic.cz)
+- Fix syntax error caused by an extra paren (tomas@sedovic.cz)
+- Fix no_log warnings for custom module (mgugino@redhat.com)
+- Add external_svc_subnet for k8s loadbalancer type service
+ (jihoon.o@samsung.com)
+- Remove openshift_facts project_cfg_facts (mgugino@redhat.com)
+- Remove dns_port fact (mgugino@redhat.com)
+- Bug 1512793- Fix idempotence issues in ASB deploy (fabian@fabianism.us)
+- Remove unused task file from etcd role (rteague@redhat.com)
+- fix type in authroize (jchaloup@redhat.com)
+- Use IP addresses for OpenStack nodes (tomas@sedovic.cz)
+- Update prometheus to 2.0.0 GA (zgalor@redhat.com)
+- remove schedulable from openshift_facts (mgugino@redhat.com)
+- inventory: Add example for service catalog vars (smilner@redhat.com)
+- Correct usage of include_role (rteague@redhat.com)
+- Remove openshift.common.cli_image (mgugino@redhat.com)
+- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
+- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
+- GlusterFS: Remove extraneous line from glusterblock template
+ (jarrpa@redhat.com)
+- Remove openshift_clock from meta depends (mgugino@redhat.com)
+- Simplify is_master_system_container logic (mgugino@redhat.com)
+- dist.iteritems() no longer exists in Python 3. (jpazdziora@redhat.com)
+- Remove spurrious file committed by error (diego.abelenda@camptocamp.com)
+- Fix name of the service pointed to by hostname
+ (diego.abelenda@camptocamp.com)
+- Missed the default value after the variable name change...
+ (diego.abelenda@camptocamp.com)
+- Change the name of the variable and explicitely document the names
+ (diego.abelenda@camptocamp.com)
+- Allow to set the hostname for routes to prometheus and alertmanager
+ (diego.abelenda@camptocamp.com)
+- Allow openshift_install_examples to be false (michael.fraenkel@gmail.com)
+- Include Deprecation - openshift-service-catalog (rteague@redhat.com)
+- Remove is_openvswitch_system_container from facts (mgugino@redhat.com)
+- Workaround the fact that package state=present with dnf fails for already
+ installed but excluded packages. (jpazdziora@redhat.com)
+- With dnf repoquery and excluded packages, --disableexcludes=all is needed to
+ list the package with --installed. (jpazdziora@redhat.com)
+- Add support for external glusterfs as registry backend (m.judeikis@gmail.com)
+- cri-o: honor additional and insecure registries again (gscrivan@redhat.com)
+- docker: copy Docker metadata to the alternative storage path
+ (gscrivan@redhat.com)
+- Add check for gluterFS DS to stop restarts (m.judeikis@gmail.com)
+- Bug 1514417 - Adding correct advertise-client-urls (shawn.hurley21@gmail.com)
+- Uninstall tuned-profiles-atomic-openshift-node as defined in origin.spec
+ (jmencak@redhat.com)
+- Mod startup script to publish all frontend binds (cwilkers@redhat.com)
+
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.13.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.12.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.11.0
--
+-
* Thu Nov 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.10.0
- tox.ini: simplify unit test reqs (lmeyer@redhat.com)
@@ -341,16 +488,16 @@ Atomic OpenShift Utilities includes
- Include Deprecation - Init Playbook Paths (rteague@redhat.com)
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0
--
+-
* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0
--
+-
* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0
- bug 1498398. Enclose content between store tag (rromerom@redhat.com)
@@ -643,10 +790,10 @@ Atomic OpenShift Utilities includes
- Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch)
* Thu Nov 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.5-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.4-1
--
+-
* Wed Nov 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.3-1
- Adding configuration for keeping transient namespace on error.
@@ -816,10 +963,10 @@ Atomic OpenShift Utilities includes
- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)
* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
--
+-
* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
--
+-
* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
- bug 1506073. Lower cpu request for logging when it exceeds limit
@@ -849,7 +996,7 @@ Atomic OpenShift Utilities includes
- Refactor health check playbooks (rteague@redhat.com)
* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
--
+-
* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
- Fixing documentation for the cert_key_path variable name.
@@ -923,16 +1070,16 @@ Atomic OpenShift Utilities includes
(hansmi@vshn.ch)
* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
--
+-
* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
--
+-
* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
- Use "requests" for CPU resources instead of limits
@@ -956,16 +1103,16 @@ Atomic OpenShift Utilities includes
(dymurray@redhat.com)
* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
--
+-
* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
- Change to service-signer.crt for template_service_broker CA_BUNDLE
@@ -988,7 +1135,7 @@ Atomic OpenShift Utilities includes
- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
--
+-
* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
- Fix pvc selector default to be empty dict instead of string
@@ -1030,16 +1177,16 @@ Atomic OpenShift Utilities includes
(jchaloup@redhat.com)
* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
--
+-
* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
--
+-
* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
- updated dynamic provision section for openshift metrics to support storage
@@ -1448,7 +1595,7 @@ Atomic OpenShift Utilities includes
- oc_atomic_container: support Skopeo output (gscrivan@redhat.com)
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
--
+-
* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0
- Fix ansible_syntax check (rteague@redhat.com)
@@ -1475,7 +1622,7 @@ Atomic OpenShift Utilities includes
(miciah.masters@gmail.com)
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
--
+-
* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
- Update openshift_hosted_routers example to be in ini format.
@@ -1537,10 +1684,10 @@ Atomic OpenShift Utilities includes
- Add missing hostnames to registry cert (sdodson@redhat.com)
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0
--
+-
* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0
- openshift_version: enterprise accepts new style pre-release
@@ -1558,13 +1705,13 @@ Atomic OpenShift Utilities includes
- Setup tuned profiles in /etc/tuned (jmencak@redhat.com)
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0
--
+-
* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0
- Add dotnet 2.0 to v3.6 (sdodson@redhat.com)
@@ -1601,13 +1748,13 @@ Atomic OpenShift Utilities includes
(sdodson@redhat.com)
* Sat Aug 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.103.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.102.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.101.0
--
+-
* Fri Aug 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.100.0
- Change memory requests and limits units (mak@redhat.com)
@@ -1906,13 +2053,13 @@ Atomic OpenShift Utilities includes
(kwoodson@redhat.com)
* Mon Jul 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.152-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.151-1
--
+-
* Sun Jul 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.150-1
--
+-
* Sat Jul 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.149-1
- Config was missed before replace. (jkaur@redhat.com)
@@ -1935,7 +2082,7 @@ Atomic OpenShift Utilities includes
- GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com)
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.143-1
--
+-
* Wed Jul 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.142-1
- add scheduled pods check (jvallejo@redhat.com)
@@ -1960,7 +2107,7 @@ Atomic OpenShift Utilities includes
- updating fetch tasks to be flat paths (ewolinet@redhat.com)
* Mon Jul 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.140-1
--
+-
* Sat Jul 08 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.139-1
- increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com)
@@ -2008,7 +2155,7 @@ Atomic OpenShift Utilities includes
- Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com)
* Wed Jul 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.134-1
--
+-
* Tue Jul 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.133-1
- etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com)
@@ -2020,7 +2167,7 @@ Atomic OpenShift Utilities includes
- Fixes to storage migration (sdodson@redhat.com)
* Mon Jul 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.132-1
--
+-
* Sun Jul 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.131-1
- Fix upgrade (sdodson@redhat.com)
@@ -2161,7 +2308,7 @@ Atomic OpenShift Utilities includes
- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
--
+-
* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
- Updating default from null to "" (ewolinet@redhat.com)
@@ -2205,7 +2352,7 @@ Atomic OpenShift Utilities includes
- CloudForms 4.5 templates (simaishi@redhat.com)
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1
--
+-
* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1
- Make rollout status check best-effort, add poll (skuznets@redhat.com)
@@ -2267,7 +2414,7 @@ Atomic OpenShift Utilities includes
- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
--
+-
* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
@@ -2323,7 +2470,7 @@ Atomic OpenShift Utilities includes
- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
--
+-
* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
@@ -2355,7 +2502,7 @@ Atomic OpenShift Utilities includes
loopback kubeconfigs. (abutcher@redhat.com)
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
--
+-
* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
- Updating image for registry_console (ewolinet@redhat.com)
@@ -2602,13 +2749,13 @@ Atomic OpenShift Utilities includes
- Fix additional master cert & client config creation. (abutcher@redhat.com)
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
--
+-
* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
--
+-
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
- Updating logging and metrics to restart api, ha and controllers when updating
@@ -2621,10 +2768,10 @@ Atomic OpenShift Utilities includes
- Moving Dockerfile content to images dir (jupierce@redhat.com)
* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
--
+-
* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
--
+-
* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
- Fix 1448368, and some other minors issues (ghuang@redhat.com)
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index 44a2ef534..69b2541bb 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -8,7 +8,7 @@
hosts: masters:!masters[0]
pre_tasks:
- set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
tasks:
- include_role:
name: openshift_logging
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 0c2a2c7e8..9f044c089 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -62,7 +62,6 @@
- origin-master
- origin-master-api
- origin-master-controllers
- - pcsd
failed_when: false
- hosts: etcd
@@ -124,7 +123,7 @@
- origin-clients
- origin-node
- origin-sdn-ovs
- - tuned-profiles-openshift-node
+ - tuned-profiles-atomic-openshift-node
- tuned-profiles-origin-node
register: result
until: result | success
@@ -384,8 +383,6 @@
- origin-excluder
- origin-docker-excluder
- origin-master
- - pacemaker
- - pcs
register: result
until: result | success
@@ -456,8 +453,6 @@
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /usr/share/openshift/examples
- - /var/lib/pacemaker
- - /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
- /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- /usr/lib/systemd/system/origin-master-api.service
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 417fb539a..d203b9cda 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi
In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+There are more examples of cluster inventory settings [`here`](../../inventory/).
#### Step 0 (optional)
@@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast
Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
```
-$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml
```
This playbook accomplishes the following:
1. Builds a dynamic inventory file by querying AWS.
-2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml)
Once this playbook completes, the cluster masters should be installed and configured.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index cab2f1e40..e7bed4f6e 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -18,7 +18,7 @@
name: lib_openshift
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
@@ -30,7 +30,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5815c4975..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -33,8 +33,8 @@
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
index da7ec9d21..9d9ed29de 100644
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -1,19 +1,19 @@
---
-- include: ../../openshift-hosted/private/config.yml
+- import_playbook: ../../openshift-hosted/private/config.yml
-- include: ../../openshift-metrics/private/config.yml
+- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
-- include: ../../common/openshift-cluster/openshift_logging.yml
+- import_playbook: ../../openshift-logging/private/config.yml
when: openshift_logging_install_logging | default(false) | bool
-- include: ../../openshift-prometheus/private/config.yml
+- import_playbook: ../../openshift-prometheus/private/config.yml
when: openshift_hosted_prometheus_deploy | default(false) | bool
-- include: ../../openshift-service-catalog/private/config.yml
+- import_playbook: ../../openshift-service-catalog/private/config.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: ../../openshift-management/private/config.yml
+- import_playbook: ../../openshift-management/private/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index f8206529a..b03fb0b7f 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -16,31 +16,31 @@
tasks_from: master_facts.yml
- name: run the init
- include: ../../init/main.yml
+ import_playbook: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../openshift-checks/private/install.yml
+ import_playbook: ../../openshift-checks/private/install.yml
- name: etcd install
- include: ../../openshift-etcd/private/config.yml
+ import_playbook: ../../openshift-etcd/private/config.yml
- name: include nfs
- include: ../../openshift-nfs/private/config.yml
+ import_playbook: ../../openshift-nfs/private/config.yml
when: groups.oo_nfs_to_config | default([]) | count > 0
- name: include loadbalancer
- include: ../../openshift-loadbalancer/private/config.yml
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
- name: include openshift-master config
- include: ../../openshift-master/private/config.yml
+ import_playbook: ../../openshift-master/private/config.yml
- name: include master additional config
- include: ../../openshift-master/private/additional_config.yml
+ import_playbook: ../../openshift-master/private/additional_config.yml
- name: include master additional config
- include: ../../openshift-node/private/config.yml
+ import_playbook: ../../openshift-node/private/config.yml
- name: include openshift-glusterfs
- include: ../../openshift-glusterfs/private/config.yml
+ import_playbook: ../../openshift-glusterfs/private/config.yml
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index f5eb01b14..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,6 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index 78dd6a49b..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,16 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster on masters
- include: install.yml
+ import_playbook: install.yml
- name: provision the infra/compute playbook to install node resources
- include: provision_nodes.yml
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
- name: Include the hosted.yml playbook to finish the hosted configuration
- include: hosted.yml
+ import_playbook: hosted.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
index 238a7eb2f..cf76c9d10 100644
--- a/playbooks/aws/provisioning-inventory.example.ini
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -11,6 +11,7 @@ etcd
openshift_deployment_type=origin
openshift_master_bootstrap_enabled=True
+openshift_master_api_port=443
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
deleted file mode 100644
index 7d03914a2..000000000
--- a/playbooks/byo/config.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: openshift-cluster/config.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
deleted file mode 100644
index 57823847b..000000000
--- a/playbooks/byo/openshift-cluster/config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
deleted file mode 100644
index 74e186f33..000000000
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-#
-# This playbook is a preview of upcoming changes for installing
-# Hosted logging on. See inventory/byo/hosts.*.example for the
-# currently supported method.
-#
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/openshift_logging.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index c46b22331..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
deleted file mode 100644
index a9be8dec4..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../../init/evaluate_groups.yml
-
-- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index c880fe7f7..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index aeec5f5cc..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 4664a9a2b..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index cbb89bc4d..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1adfbdec0..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b4da18281..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
index 14b0f85d4..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -4,4 +4,4 @@
#
# Upgrades scale group nodes only.
#
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
index f7e5dd1d2..c4094aa7e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index cc04d81c1..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 37a9f69bb..74981cc31 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
index e8f9d94e2..a2a9d59f2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index acb4195e3..869e185af 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index df19097e1..a5867434b 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
deleted file mode 100644
index ca09fb65c..000000000
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated.
-- include: ../../openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 29e0ebe8d..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,13 +1,12 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
hosts: oo_all_hosts
roles:
- openshift_facts
tasks:
- - openshift_facts:
- openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
+ - openshift_facts: {}
register: result
- debug:
var: result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 261143080..5a877809a 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/evaluate_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index 2eeb81b86..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: ../../openshift-checks/private/install.yml
-
-- include: ../../openshift-etcd/private/config.yml
-
-- include: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../../openshift-master/private/config.yml
-
-- include: ../../openshift-master/private/additional_config.yml
-
-- include: ../../openshift-node/private/config.yml
-
-- include: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-hosted/private/config.yml
-
-- include: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: openshift_logging.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- include: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- include: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 800621857..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,11 +1,10 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index a66301c0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..38aa9df47 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,9 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -32,6 +34,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
@@ -51,7 +54,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +62,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 3b779becb..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -11,9 +11,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 83be290e6..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -44,5 +44,5 @@
register: result
until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 2e3a7ae8b..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_installed }} --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..8ee83819e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,14 +1,14 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
vars:
openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..d5b82d9a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 6d8503879..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..2ab9f852c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,93 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin and openshift-enterprise
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index 6a5bc24f7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - when: openshift.common.is_containerized | bool
- block:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type }}-master"
-
- # In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
- command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
- register: master_api_service_status
-
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- when:
- - master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
-
- - name: Ensure Master is running
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 84b740227..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 503d75ba0..7b82fe05b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,22 +2,8 @@
###############################################################################
# Upgrade Masters
###############################################################################
-
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +16,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +57,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +68,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -275,7 +261,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -305,7 +291,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..f7a85545b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
@@ -45,7 +45,6 @@
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index d9ce3a7e3..4fc897a57 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
- include: init.yml
+ import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -40,14 +34,27 @@
register: node_unschedulable
until: node_unschedulable|succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result | failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
# Alright, let's clean up!
- name: clean up the old scale group
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5f9c56867..a5ad3801d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,104 +15,29 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1aac3d014..1498db4c5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,110 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 306b76422..6958652d8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 6d4949542..4daa9e490 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,105 +15,26 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +42,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 0a592896b..1750148d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,113 +11,37 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +49,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b381d606a..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index e7d7756d1..0f74e0137 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,105 +15,26 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -121,16 +42,18 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index be362e3ff..08bfd239f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,113 +11,37 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -125,14 +49,16 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 6e68116b0..b5f1038fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 94c16cae0..0aea5069d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,124 +2,35 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - set_fact:
+ pre_upgrade_complete: True
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -127,6 +38,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -137,6 +50,6 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 2045f6379..05aa737c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -11,119 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - set_fact:
+ pre_upgrade_complete: True
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -131,6 +50,8 @@
- name: Cycle all controller services to force new leader election mode
hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- name: Stop {{ openshift.common.service_type }}-master-controllers
systemd:
@@ -141,4 +62,4 @@
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 6134f8653..1d1b255c1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -4,112 +4,31 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
new file mode 100644
index 000000000..f15aa771f
--- /dev/null
+++ b/playbooks/container-runtime/config.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
new file mode 100644
index 000000000..67445edeb
--- /dev/null
+++ b/playbooks/container-runtime/private/config.yml
@@ -0,0 +1,28 @@
+---
+- hosts: "{{ l_containerized_host_groups }}"
+ vars:
+ l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: package_docker.yml
+ when:
+ - not openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_docker.yml
+ when:
+ - openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_crio.yml
+ when:
+ - openshift_use_crio | bool
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles
new file mode 120000
index 000000000..148b13206
--- /dev/null
+++ b/playbooks/container-runtime/private/roles
@@ -0,0 +1 @@
+../../roles/ \ No newline at end of file
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
new file mode 100644
index 000000000..0e6bde09a
--- /dev/null
+++ b/playbooks/deploy_cluster.yml
@@ -0,0 +1,46 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-checks/private/install.yml
+
+- import_playbook: openshift-etcd/private/config.yml
+
+- import_playbook: openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: openshift-master/private/config.yml
+
+- import_playbook: openshift-master/private/additional_config.yml
+
+- import_playbook: openshift-node/private/config.yml
+
+- import_playbook: openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-hosted/private/config.yml
+
+- import_playbook: openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/provision.yml
index 097717607..6016e6a78 100644
--- a/playbooks/gcp/openshift-cluster/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -9,8 +9,5 @@
include_role:
name: openshift_gcp
-- name: run the init
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8787c87e1..8087f6ffc 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -46,14 +46,9 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
+ Running etcd as an embedded service is no longer supported.
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 5a7483b72..b2b972a7d 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -18,8 +18,10 @@
- import_playbook: facts.yml
- import_playbook: sanity_checks.yml
+ when: not (skip_sanity_checks | default(False))
- import_playbook: validate_hostnames.yml
+ when: not (skip_validate_hostnames | default(False))
- import_playbook: repos.yml
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index c9f186e72..f3bb3c2d1 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -7,7 +7,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: ca
+ tasks_from: ca.yml
vars:
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index d738c8207..ce21a1f96 100644
--- a/playbooks/openshift-etcd/private/certificates-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -5,10 +5,10 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_generated_certificates
+ tasks_from: backup_generated_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_generated_certificates
+ tasks_from: remove_generated_certificates.yml
- name: Backup deployed etcd certificates
hosts: oo_etcd_to_config
@@ -16,4 +16,4 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_server_certificates
+ tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 3d6c79834..35407969e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -19,7 +19,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index 514319b88..be177b714 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -20,9 +20,9 @@
- name: Check the master API is ready
include_role:
name: openshift_master
- tasks_from: check_master_api_is_ready
+ tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
@@ -34,7 +34,7 @@
# Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -42,7 +42,7 @@
- include_role:
name: etcd
- tasks_from: backup.archive
+ tasks_from: backup.archive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -58,7 +58,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_master_etcd_certificates
+ tasks_from: backup_master_etcd_certificates.yml
- name: Redeploy master etcd certificates
import_playbook: master_etcd_certificates.yml
@@ -75,10 +75,10 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
# 6. copy the embedded etcd backup to the external host
# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
@@ -93,7 +93,7 @@
- include_role:
name: etcd
- tasks_from: backup.fetch
+ tasks_from: backup.fetch.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -103,7 +103,7 @@
- include_role:
name: etcd
- tasks_from: backup.copy
+ tasks_from: backup.copy.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -124,14 +124,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup.unarchive
+ tasks_from: backup.unarchive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- include_role:
name: etcd
- tasks_from: backup.force_new_cluster
+ tasks_from: backup.force_new_cluster.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
@@ -145,7 +145,7 @@
tasks:
- include_role:
name: openshift_master
- tasks_from: configure_external_etcd
+ tasks_from: configure_external_etcd.yml
vars:
etcd_peer_url_scheme: "https"
etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 4269918c2..313ed8bec 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -17,9 +17,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.pre_check
+ tasks_from: migrate.pre_check.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
# TODO: This will be different for release-3.6 branch
@@ -28,8 +27,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
@@ -46,10 +45,9 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
@@ -74,7 +72,7 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
@@ -82,9 +80,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate
+ tasks_from: migrate.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -95,9 +92,8 @@
tasks:
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
etcd_url_scheme: "https"
etcd_peer_url_scheme: "https"
@@ -132,7 +128,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.add_ttls
+ tasks_from: migrate.add_ttls.yml
vars:
etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
etcd_url_scheme: "https"
@@ -144,7 +140,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.configure_master
+ tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index cc5d57031..158bcb849 100644
--- a/playbooks/openshift-etcd/private/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -16,12 +16,12 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_ca_certificates
+ tasks_from: backup_ca_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_ca_certificates
+ tasks_from: remove_ca_certificates.yml
-- include: ca.yml
+- import_playbook: ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -44,7 +44,7 @@
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
when: ('expired' not in (hostvars
| oo_select_keys(groups['etcd'])
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: retrieve_ca_certificates
+ tasks_from: retrieve_ca_certificates.yml
vars:
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
@@ -82,7 +82,7 @@
state: absent
changed_when: false
-- include: ../../openshift-master/private/restart.yml
+- import_playbook: ../../openshift-master/private/restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
diff --git a/playbooks/openshift-etcd/private/redeploy-certificates.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml
index cc1e6adf5..1c8eb27ac 100644
--- a/playbooks/openshift-etcd/private/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml
@@ -11,8 +11,8 @@
# certificates were previously expired.
- role: openshift_certificate_expiry
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
etcd_certificates_redeploy: true
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index fac8e3f02..3ef043ec8 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -32,7 +32,7 @@
until: etcd_add_check.rc == 0
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
@@ -78,4 +78,4 @@
post_tasks:
- include_role:
name: openshift_master
- tasks_from: update_etcd_client_urls
+ tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 14c74baf3..695b53990 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -7,7 +7,7 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 531175c85..7dfea07f1 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -6,10 +6,9 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index 6fca42bd0..c133c0201 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 5b8ba3bb2..e373a4a4c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -6,7 +6,7 @@
# available in the repos. So for Fedora we'll simply skip this, sorry.
- name: Backup etcd before upgrading anything
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
@@ -16,14 +16,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: drop_etcdctl
+ tasks_from: drop_etcdctl.yml
- name: Perform etcd upgrade
- include: ./upgrade.yml
+ import_playbook: upgrade_step.yml
when: openshift_etcd_upgrade | default(true) | bool
- name: Backup etcd
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index 51e8786b3..902c39d9c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_rpm
+ tasks_from: upgrade_rpm.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
index c5ff4133c..60127fc68 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -6,47 +6,47 @@
name: etcd
tasks_from: version_detect.yml
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.1'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.2.5'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.3'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.3.7'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.0'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.0.15'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.1'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.1.3'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.2.7'
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
etcd_peer: "{{ openshift.common.hostname }}"
when:
diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml
index b1d23675d..769d694ba 100644
--- a/playbooks/openshift-etcd/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/redeploy-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-ca.yml
+- import_playbook: private/redeploy-ca.yml
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
index 1bd302c03..753878d70 100644
--- a/playbooks/openshift-etcd/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -1,10 +1,10 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: ../openshift-master/private/restart.yml
+- import_playbook: ../openshift-master/private/restart.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
new file mode 100644
index 000000000..ccc797527
--- /dev/null
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index f62aea229..107bbfff6 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster.
devices but you must specify the following variables in `[OSEv3:vars]`:
* `openshift_storage_glusterfs_is_missing=False`
* `openshift_storage_glusterfs_heketi_is_missing=False`
+ * If GlusterFS will be running natively, the target hosts must also be listed
+ in the `nodes` group. They must also already be configured as OpenShift
+ nodes before this playbook runs.
By default, pods for a native GlusterFS cluster will be created in the
`default` namespace. To change this, specify
diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml
index 75c1f0300..917b729f9 100644
--- a/playbooks/openshift-glusterfs/private/registry.yml
+++ b/playbooks/openshift-glusterfs/private/registry.yml
@@ -1,40 +1,11 @@
---
- import_playbook: config.yml
-- name: Initialize GlusterFS registry PV and PVC vars
- hosts: oo_first_master
- tags: hosted
- tasks:
- - set_fact:
- glusterfs_pv: []
- glusterfs_pvc: []
-
- - set_fact:
- glusterfs_pv:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- storage:
- glusterfs:
- endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
- path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
- readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
- glusterfs_pvc:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- when: openshift.hosted.registry.storage.glusterfs.swap
-
- name: Create persistent volumes
hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
roles:
- role: openshift_persistent_volumes
- when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
- name: Create Hosted Resources
hosts: oo_first_master
diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
index 8a60a30b8..41ae2eb69 100644
--- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml
+++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
@@ -1,9 +1,5 @@
---
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
roles:
- role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
index 65fb0abda..518a1d624 100644
--- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-registry-certificates.yml
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
index 8dc052751..a74dd8c79 100644
--- a/playbooks/openshift-hosted/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-router-certificates.yml
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 78fe663db..2636d857e 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -11,13 +11,6 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- name: Configure firewall load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
-
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml
new file mode 100644
index 000000000..83d330284
--- /dev/null
+++ b/playbooks/openshift-logging/config.yml
@@ -0,0 +1,9 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/hosts.example for the
+# currently supported method.
+#
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml
index bc59bd95a..bc59bd95a 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/openshift-logging/private/config.yml
diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/openshift-logging/private/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library
new file mode 120000
index 000000000..ba40d2f56
--- /dev/null
+++ b/playbooks/openshift-logging/private/library
@@ -0,0 +1 @@
+../../../library \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/openshift-logging/private/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-logging/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index b7cfbe4e4..a90cd6b22 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -19,8 +19,6 @@
openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_project_request_template
when: openshift_project_request_template_manage
- role: openshift_examples
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index afb8d6bd1..15d301ddb 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -19,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -180,9 +179,7 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- - role: openshift_hosted_facts
- role: openshift_clock
- role: openshift_cloud_provider
- role: openshift_builddefaults
@@ -228,6 +225,8 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- include_tasks: tasks/wire_aggregator.yml
@@ -237,7 +236,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-master/private/redeploy-certificates.yml
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 59657574a..9f5502141 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -56,7 +56,7 @@
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
# Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
- # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ # This change will be reverted in playbooks/redeploy-certificates.yml
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
@@ -207,7 +207,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
@@ -272,7 +272,7 @@
state: absent
changed_when: false
-- include: ../../openshift-node/private/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 8229eccfa..007b23ea3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,11 +20,11 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 97acc5d5d..4f55d5c82 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -180,21 +180,19 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 5dbb21502..1077d0b9c 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -14,9 +14,6 @@
- role: common
local_facts:
rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-master/redeploy-certificates.yml
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
index 3ae74c7a0..27f4e6b7d 100644
--- a/playbooks/openshift-master/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -1,4 +1,4 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-openshift-ca.yml
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml
index 6ea77e00b..3625efcc6 100644
--- a/playbooks/openshift-nfs/private/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -14,7 +14,6 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index 261e2048f..b86cb3cc2 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -33,7 +33,6 @@
roles:
- role: flannel
etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
when: openshift_use_flannel | default(false) | bool
- name: Additional node config
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index dc5d7a57e..32b288c8b 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -10,7 +10,6 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
- role: tuned
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 5afa83be7..ef07669cb 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -12,7 +12,6 @@
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/openshift-node/private/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml
index b7ac27bda..6b517197d 100644
--- a/playbooks/openshift-node/private/image_prep.yml
+++ b/playbooks/openshift-node/private/image_prep.yml
@@ -1,12 +1,10 @@
---
- name: normalize groups
- import_playbook: ../../init/evaluate_groups.yml
-
-- name: initialize the facts
- import_playbook: ../../init/facts.yml
-
-- name: initialize the repositories
- import_playbook: ../../init/repos.yml
+ import_playbook: ../../prerequisites.yml
+ vars:
+ skip_version: True
+ skip_sanity_checks: True
+ skip_validate_hostnames: True
- name: run node config setup
import_playbook: setup.yml
diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml
index 7211787be..39640345f 100644
--- a/playbooks/openshift-node/private/network_manager.yml
+++ b/playbooks/openshift-node/private/network_manager.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../init/evaluate_groups.yml
-
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
index 3bd38a61d..c0f75ae80 100644
--- a/playbooks/openshift-node/private/redeploy-certificates.yml
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: certificates-backup.yml
+- import_playbook: certificates-backup.yml
-- include: certificates.yml
+- import_playbook: certificates.yml
vars:
openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 41eb00f99..0786bd7d3 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -23,9 +23,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -40,7 +40,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..541913aef 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index df727247b..8b7272485 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
-- include: private/redeploy-certificates.yml
+- import_playbook: private/redeploy-certificates.yml
-- include: private/restart.yml
+- import_playbook: private/restart.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index ff0b7adc7..d361d6278 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -215,7 +215,6 @@ advanced configuration:
* [External Dns][external-dns]
* Multiple Clusters (TODO)
* [Cinder Registry][cinder-registry]
-* [Bastion Node][bastion]
[ansible]: https://www.ansible.com/
@@ -227,11 +226,10 @@ advanced configuration:
[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
[origin]: https://www.openshift.org/
[centos7]: https://www.centos.org/
-[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example
+[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
[advanced-configuration]: ./advanced-configuration.md
[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster
[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
-[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index c0bdf5020..2c9b70b5f 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -23,35 +23,14 @@ There are no additional dependencies for the cluster nodes. Required
configuration steps are done by Heat given a specific user data config
that normally should not be changed.
-## Required galaxy modules
-
-In order to pull in external dependencies for DNS configuration steps,
-the following commads need to be executed:
-
- ansible-galaxy install \
- -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \
- -p openshift-ansible-contrib/roles
-
-Alternatively you can install directly from github:
-
- ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \
- -p openshift-ansible-contrib/roles
-
-Notes:
-* This assumes we're in the directory that contains the clonned
-openshift-ansible-contrib repo in its root path.
-* When trying to install a different version, the previous one must be removed first
-(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)).
-Otherwise, even if there are differences between the two versions, installation of the newer version is skipped.
-
-
## Accessing the OpenShift Cluster
### Configure DNS
-OpenShift requires two DNS records to function fully. The first one points to
+OpenShift requires a two public DNS records to function fully. The first one points to
the master/load balancer and provides the UI/API access. The other one is a
-wildcard domain that resolves app route requests to the infra node.
+wildcard domain that resolves app route requests to the infra node. A private DNS
+server and records are not required and not managed here.
If you followed the default installation from the README section, there is no
DNS configured. You should add two entries to the `/etc/hosts` file on the
@@ -180,15 +159,26 @@ So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
without dnsmasq proxies.
-The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all
-your servers will be under. With the default values, this will be
-`openshift.example.com`. For workloads, the default subdomain is 'apps'.
-That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in
-the inventory.
+The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
+will form the cluster's public DNS domain all your servers will be under. With
+the default values, this will be `openshift.example.com`. For workloads, the
+default subdomain is 'apps'. That sudomain can be set as well by the
+`openshift_openstack_app_subdomain` variable in the inventory.
+
+If you want to use a two sets of hostnames for public and private/prefixed DNS
+records for your externally managed public DNS server, you can specify
+`openshift_openstack_public_hostname_suffix` and/or
+`openshift_openstack_private_hostname_suffix`. The suffixes will be added
+to the nsupdate records sent to the external DNS server. Those are empty by default.
+
+**Note** the real hostnames, Nova servers' or ansible hostnames and inventory
+variables will not be updated. The deployment may be done on arbitrary named
+hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore
+the suffixes.
The `openstack_<role name>_hostname` is a set of variables used for customising
-hostnames of servers with a given role. When such a variable stays commented,
-default hostname (usually the role name) is used.
+public names of Nova servers provisioned with a given role. When such a variable stays commented,
+default value (usually the role name) is used.
The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all
the created Nova servers. These will provide the internal name resolution for
@@ -203,7 +193,7 @@ When Network Manager is enabled for provisioned cluster nodes, which is
normally the case, you should not change the defaults and always deploy dnsmasq.
`openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s)
-processing dynamic records updates in the public and private cluster views:
+processing dynamic records updates in the public only cluster view:
openshift_openstack_external_nsupdate_keys:
public:
@@ -211,10 +201,6 @@ processing dynamic records updates in the public and private cluster views:
key_algorithm: 'hmac-md5'
key_name: 'update-key'
server: <public DNS server IP>
- private:
- key_secret: <some nsupdate key 2>
- key_algorithm: 'hmac-sha256'
- server: <public or private DNS server IP>
Here, for the public view section, we specified another key algorithm and
optional `key_name`, which normally defaults to the cluster's DNS domain.
@@ -222,24 +208,6 @@ This just illustrates a compatibility mode with a DNS service deployed
by OpenShift on OSP10 reference architecture, and used in a mixed mode with
another external DNS server.
-Another example defines an external DNS server for the public view
-additionally to the in-stack DNS server used for the private view only:
-
- openshift_openstack_external_nsupdate_keys:
- public:
- key_secret: <some nsupdate key>
- key_algorithm: 'hmac-sha256'
- server: <public DNS server IP>
-
-Here, updates matching the public view will be hitting the given public
-server IP. While updates matching the private view will be sent to the
-auto evaluated in-stack DNS server's **public** IP.
-
-Note, for the in-stack DNS server, private view updates may be sent only
-via the public IP of the server. You can not send updates via the private
-IP yet. This forces the in-stack private server to have a floating IP.
-See also the [security notes](#security-notes)
-
## Flannel networking
In order to configure the
@@ -328,14 +296,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the
prerequisite packages to be installed before to deploy an OpenShift cluster.
Those are ignored though, if the `manage_packages: False`.
-The `openstack_inventory` controls either a static inventory will be created after the
-cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory
-is yet to be supported, so the static inventory will be created anyway.
-
-The `openstack_inventory_path` points the directory to host the generated static inventory.
-It should point to the copied example inventory directory, otherwise ti creates
-a new one for you.
-
## Multi-master configuration
Please refer to the official documentation for the
@@ -345,7 +305,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst
in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node
under the ansible group named `ext_lb`:
- openshift_master_cluster_method: native
openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"
openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}"
@@ -384,18 +343,6 @@ be the case for development environments. When turned off, the servers will
be provisioned omitting the ``yum update`` command. This brings security
implications though, and is not recommended for production deployments.
-### DNS servers security options
-
-Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS
-servers, there are following (bind/named specific) DNS security
-options available:
-
- named_public_recursion: 'no'
- named_private_recursion: 'yes'
-
-External DNS servers, which is not included in the 'dns' hosts group,
-are not managed. It is up to you to configure such ones.
-
## Configure the OpenShift parameters
Finally, you need to update the DNS entry in
@@ -407,7 +354,7 @@ installation for example by specifying the authentication.
The full list of options is available in this sample inventory:
-https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example
+https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example
Note, that in order to deploy OpenShift origin, you should update the following
variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`:
@@ -538,43 +485,6 @@ You can also run the registry setup playbook directly:
-## Configure static inventory and access via a bastion node
-
-Example inventory variables:
-
- openshift_openstack_use_bastion: true
- openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24"
- openstack_private_ssh_key: ~/.ssh/id_rsa
- openstack_inventory: static
- openstack_inventory_path: ../../../../inventory
- openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com
-
-The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster.
-And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes
-additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above).
-
-The SSH config will be stored on the ansible control node by the
-gitven path. Ansible uses it automatically. To access the cluster nodes with
-that ssh config, use the `-F` prefix, f.e.:
-
- ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK
-
-Note, relative paths will not work for the `openstack_ssh_config_path`, but it
-works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this
-guide, the latter points to the current directory, where you run ansible commands
-from.
-
-To verify nodes connectivity, use the command:
-
- ansible -v -i inventory/hosts -m ping all
-
-If something is broken, double-check the inventory variables, paths and the
-generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files.
-
-The `inventory: dynamic` can be used instead to access cluster nodes directly via
-floating IPs. In this mode you can not use a bastion node and should specify
-the dynamic inventory file in your ansible commands , like `-i openstack.py`.
-
## Using Docker on the Ansible host
If you don't want to worry about the dependencies, you can use the
@@ -604,28 +514,6 @@ the playbooks:
ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
-### Run the playbook
-
-Assuming your OpenStack (Keystone) credentials are in the `keystonerc`
-this is how you stat the provisioning process from your ansible control node:
-
- . keystonerc
- ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
-
-Note, here you start with an empty inventory. The static inventory will be populated
-with data so you can omit providing additional arguments for future ansible commands.
-
-If bastion enabled, the generates SSH config must be applied for ansible.
-Otherwise, it is auto included by the previous step. In order to execute it
-as a separate playbook, use the following command:
-
- ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml
-
-The first infra node then becomes a bastion node as well and proxies access
-for future ansible commands. The post-provision step also configures Satellite,
-if requested, and DNS server, and ensures other OpenShift requirements to be met.
-
-
## Running Custom Post-Provision Actions
A custom playbook can be run like this:
@@ -727,27 +615,12 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/
Once it succeeds, you can install openshift by running:
- ansible-playbook openshift-ansible/playbooks/byo/config.yml
+ ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml
## Access UI
OpenShift UI may be accessed via the 1st master node FQDN, port 8443.
-When using a bastion, you may want to make an SSH tunnel from your control node
-to access UI on the `https://localhost:8443`, with this inventory variable:
-
- openshift_openstack_ui_ssh_tunnel: True
-
-Note, this requires sudo rights on the ansible control node and an absolute path
-for the `openstack_private_ssh_key`. You should also update the control node's
-`/etc/hosts`:
-
- 127.0.0.1 master-0.openshift.example.com
-
-In order to access UI, the ssh-tunnel service will be created and started on the
-control node. Make sure to remove these changes and the service manually, when not
-needed anymore.
-
## Scale Deployment up/down
### Scaling up
@@ -766,5 +639,3 @@ Usage:
```
ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>]
```
-
-Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`).
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 1c4f609e3..3211f619a 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -8,8 +8,5 @@
# values here. We do it in the OSEv3 group vars. Do we need to add
# some logic here?
-- name: run the initialization
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 36d8c8215..583e72b51 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -10,7 +10,7 @@
# NOTE(shadower): Bring in the host groups:
- name: evaluate groups
- include: ../../init/evaluate_groups.yml
+ import_playbook: ../../init/evaluate_groups.yml
- name: Wait for the nodes and gather their facts
@@ -26,9 +26,9 @@
- name: Gather facts for the new nodes
setup:
+- name: set common facts
+ import_playbook: ../../init/facts.yml
-# NOTE(shadower): the (internal) DNS must be functional at this point!!
-# That will have happened in provision.yml if nsupdate was configured.
# TODO(shadower): consider splitting this up so people can stop here
# and configure their DNS if they have to.
@@ -47,6 +47,13 @@
hosts: oo_all_hosts
become: yes
gather_facts: yes
+ roles:
+ - role: rhel_subscribe
+ when:
+ - ansible_distribution == "RedHat"
+ - rhsub_user | default(False)
+ - rhsub_pass | default(False)
+
tasks:
- name: Install dependencies
include_role:
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
index 5d88c105f..fc2854605 100644
--- a/playbooks/openstack/openshift-cluster/provision_install.yml
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -1,9 +1,9 @@
---
- name: Check the prerequisites for cluster provisioning in OpenStack
- include: prerequisites.yml
+ import_playbook: prerequisites.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster
- include: install.yml
+ import_playbook: install.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 68d898d9a..933117127 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -6,7 +6,6 @@ openshift_deployment_type: origin
#openshift_release: v3.5
openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
-openshift_master_cluster_method: native
openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
osm_default_node_selector: 'region=primary'
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index ae1528123..c7afe9a24 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -82,27 +82,10 @@ openshift_openstack_docker_volume_size: "15"
openshift_openstack_subnet_prefix: "192.168.99"
-## Red Hat subscription defaults to false which means we will not attempt to
-## subscribe the nodes
-#rhsm_register: False
-
-# # Using Red Hat Satellite:
-#rhsm_register: True
-#rhsm_satellite: 'sat-6.example.com'
-#rhsm_org: 'OPENSHIFT_ORG'
-#rhsm_activationkey: '<activation-key>'
-
-# # Or using RHN username, password and optionally pool:
-#rhsm_register: True
-#rhsm_username: '<username>'
-#rhsm_password: '<password>'
-#rhsm_pool: '<pool id>'
-
-#rhsm_repos:
-# - "rhel-7-server-rpms"
-# - "rhel-7-server-ose-3.5-rpms"
-# - "rhel-7-server-extras-rpms"
-# - "rhel-7-fast-datapath-rpms"
+## Red Hat subscription:
+#rhsub_user: '<username>'
+#rhsub_pass: '<password>'
+#rhsub_pool: '<pool name>'
# # Roll-your-own DNS
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7dd59c5d8..7b7868cfe 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,12 +1,12 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
vars:
skip_verison: True
-- hosts: "{{ l_containerized_host_groups }}"
- vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
- l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
- tasks:
- - include_role:
- name: container_runtime
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
+- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
index 45135c10e..b5fcb951d 100644
--- a/playbooks/redeploy-certificates.yml
+++ b/playbooks/redeploy-certificates.yml
@@ -1,26 +1,26 @@
---
-- include: init/main.yml
+- import_playbook: init/main.yml
-- include: openshift-etcd/private/redeploy-certificates.yml
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
-- include: openshift-master/private/redeploy-certificates.yml
+- import_playbook: openshift-master/private/redeploy-certificates.yml
-- include: openshift-node/private/redeploy-certificates.yml
+- import_playbook: openshift-node/private/redeploy-certificates.yml
-- include: openshift-etcd/private/restart.yml
+- import_playbook: openshift-etcd/private/restart.yml
vars:
g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
-- include: openshift-node/private/restart.yml
+- import_playbook: openshift-node/private/restart.yml
-- include: openshift-hosted/private/redeploy-router-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
when: openshift_hosted_manage_router | default(true) | bool
-- include: openshift-hosted/private/redeploy-registry-certificates.yml
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
when: openshift_hosted_manage_registry | default(true) | bool
-- include: openshift-master/private/revert-client-ca.yml
+- import_playbook: openshift-master/private/revert-client-ca.yml
-- include: openshift-master/private/restart.yml
+- import_playbook: openshift-master/private/restart.yml
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 0e3863304..bbc6edd48 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -14,7 +14,6 @@
vars:
etcd_cert_prefix: calico.etcd-
etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index e363c1714..51f469aaf 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -1,18 +1,23 @@
-Docker
+Container Runtime
=========
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-Requirements
-------------
+This role is designed to be used with include_role and tasks_from.
-Ansible 2.2
+Entry points
+------------
+* package_docker.yml - install and setup docker container runtime.
+* systemcontainer_docker.yml - utilize docker + systemcontainer
+* systemcontainer_crio.yml - utilize crio + systemcontainer
+* registry_auth.yml - place docker login credentials.
-Mandator Role Variables
---------------
+Requirements
+------------
+Ansible 2.4
Dependencies
@@ -24,9 +29,9 @@ Example Playbook
----------------
- hosts: servers
- roles:
- - role: container_runtime
- docker_udev_workaround: "true"
+ tasks:
+ - include_role: container_runtime
+ tasks_from: package_docker.yml
License
-------
@@ -36,4 +41,4 @@ ASL 2.0
Author Information
------------------
-OpenShift operations, Red Hat, Inc
+Red Hat, Inc
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 62b3e141a..d7eb8663f 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -59,6 +59,7 @@ docker_default_storage_path: /var/lib/docker
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
@@ -71,10 +72,62 @@ docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}"
openshift_use_crio: False
openshift_use_crio_only: False
+l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}"
+l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+# --------------------- #
+# systemcontainers_crio #
+# --------------------- #
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
-l_openshift_image_tag_default: "{{ openshift_release }}"
-l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+
+openshift_crio_image_tag_default: "latest"
+
+l_crt_crio_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
+
+l_crt_crio_image_prepend_dict:
+ openshift-enterprise: "registry.access.redhat.com/openshift3"
+ origin: "docker.io/gscrivano"
+
+l_crt_crio_image_dict:
+ Fedora:
+ crio_image_name: "cri-o-fedora"
+ crio_image_tag: "latest"
+ CentOS:
+ crio_image_name: "cri-o-centos"
+ crio_image_tag: "latest"
+ RedHat:
+ crio_image_name: "cri-o"
+ crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
+
+l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+
+l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
+
+# ----------------------- #
+# systemcontainers_docker #
+# ----------------------- #
+l_crt_docker_image_prepend_dict:
+ Fedora: "registry.fedoraproject.org/f25"
+ Centos: "docker.io/gscrivano"
+ RedHat: "registry.access.redhat.com/openshift3"
+
+openshift_docker_image_tag_default: "latest"
+l_crt_docker_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
+
+l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
+
+l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
+
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml
index dde099984..dde099984 100644
--- a/roles/openshift_atomic/tasks/proxy.yml
+++ b/roles/container_runtime/tasks/common/atomic_proxy.yml
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
new file mode 100644
index 000000000..d790eb2c0
--- /dev/null
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
+- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+# registry_auth is called directly with include_role in some places, so we
+# have to put it in the root of the tasks/ directory.
+- include_tasks: ../registry_auth.yml
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- include_tasks: setup_docker_symlink.yml
+ when:
+ - openshift_use_crio
+ - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml
new file mode 100644
index 000000000..990fe66da
--- /dev/null
+++ b/roles/container_runtime/tasks/common/pre.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: udev_workaround.yml
+ when: docker_udev_workaround | default(False) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not openshift_use_crio_only | bool
diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
new file mode 100644
index 000000000..d7aeb192e
--- /dev/null
+++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
@@ -0,0 +1,38 @@
+---
+- block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift_docker_service_name }}"
+
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift_docker_service_name }}"
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
new file mode 100644
index 000000000..715ed492d
--- /dev/null
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/container_runtime/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml
index 257c3123d..257c3123d 100644
--- a/roles/container_runtime/tasks/udev_workaround.yml
+++ b/roles/container_runtime/tasks/common/udev_workaround.yml
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
new file mode 100644
index 000000000..e62cf5505
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -0,0 +1,27 @@
+---
+# Sanity checks to ensure the role will complete and provide helpful error
+# messages for common problems.
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
new file mode 100644
index 000000000..f29619f42
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -0,0 +1,67 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_upgrade to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ args:
+ warn: no
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
+ register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ # Disable the 1.12 requirement if the user set a specific Docker version
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_version is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: pkg_check.rc == 0 and docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+# Additional checks for Atomic hosts:
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 6d68082b1..96d8606c6 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,85 +1,2 @@
---
-- include_tasks: udev_workaround.yml
- when: docker_udev_workaround | default(False) | bool
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
- - not openshift_use_crio_only | bool
-
-- name: Use Package Docker if Requested
- include_tasks: package_docker.yml
- when:
- - not openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Ensure /var/lib/containers exists
- file:
- path: /var/lib/containers
- state: directory
-
-- name: Fix SELinux Permissions on /var/lib/containers
- command: "restorecon -R /var/lib/containers/"
- changed_when: false
-
-- name: Use System Container Docker if Requested
- include_tasks: systemcontainer_docker.yml
- when:
- - openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Add CRI-O usage Requested
- include_tasks: systemcontainer_crio.yml
- when:
- - openshift_use_crio
- - openshift_docker_is_node_or_master | bool
-
-- name: stat the docker data dir
- stat:
- path: "{{ docker_default_storage_path }}"
- register: dockerstat
-
-- when:
- - openshift_use_crio
- - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
- block:
- - name: stop the current running docker
- systemd:
- state: stopped
- name: "{{ openshift_docker_service_name }}"
-
- - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
- command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc != 0
-
- - name: "Set the selinux context on {{ docker_alt_storage_path }}"
- command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc == 1
- - "'already exists' not in results.stderr"
-
- - name: "restorecon the {{ docker_alt_storage_path }}"
- command: "restorecon -r {{ docker_alt_storage_path }}"
-
- - name: Remove the old docker location
- file:
- state: absent
- path: "{{ docker_default_storage_path }}"
-
- - name: Setup the link
- file:
- state: link
- src: "{{ docker_alt_storage_path }}"
- path: "{{ docker_default_storage_path }}"
-
- - name: start docker
- systemd:
- state: started
- name: "{{ openshift_docker_service_name }}"
+# This role is meant to be used with include_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 40ab75a25..89899c9cf 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: common/pre.yml
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -7,35 +9,16 @@
until: curr_docker_version | succeeded
changed_when: false
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+# Some basic checks to ensure the role will complete
+- include_tasks: docker_sanity.yml
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ package:
+ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
+ state: present
when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
register: result
until: result | success
@@ -161,7 +144,4 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- meta: flush_handlers
-
-# This needs to run after docker is restarted to account for proxy settings.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 8dcfe60ef..61f122f3c 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -1,39 +1,14 @@
---
# TODO: Much of this file is shared with container engine tasks
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+ - not l_is_node_system_container | bool
+
+- include_tasks: common/pre.yml
+- include_tasks: common/syscontainer_packages.yml
- name: Check that overlay is in the kernel
shell: lsmod | grep overlay
@@ -60,50 +35,11 @@
state: restarted
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set CRI-O image defaults
- set_fact:
- l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "cri-o-fedora"
- l_crio_image_tag: "latest"
-
- - name: Use Centos based image when distribution is CentOS
- set_fact:
- l_crio_image_name: "cri-o-centos"
- when: ansible_distribution == "CentOS"
-
- - name: Set CRI-O image tag
- set_fact:
- l_crio_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use RHEL based image when distribution is Red Hat
- set_fact:
- l_crio_image_prepend: "registry.access.redhat.com/openshift3"
- l_crio_image_name: "cri-o"
- when: ansible_distribution == "RedHat"
-
- - name: Set the full image name
- set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
-
- # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- - name: Use a specific image if requested
- set_fact:
- l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
- when:
- - openshift_crio_systemcontainer_image_override is defined
- - openshift_crio_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_crio_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_crio_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
@@ -112,7 +48,6 @@
environment:
NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
- name: Install CRI-O System Container
oc_atomic_container:
name: "cri-o"
@@ -139,8 +74,7 @@
state: directory
- name: setup firewall for CRI-O
- include_tasks: crio_firewall.yml
- static: yes
+ import_tasks: crio_firewall.yml
- name: Configure the CNI network
template:
@@ -155,10 +89,8 @@
daemon_reload: yes
register: start_result
-- meta: flush_handlers
-
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 84217e50c..10570fe34 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -11,32 +11,9 @@
traditional docker package install. Otherwise, comment out openshift_docker_options
in your inventory file.
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/pre.yml
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/syscontainer_packages.yml
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
@@ -59,48 +36,11 @@
delay: 30
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set to default prepend
- set_fact:
- l_docker_image_prepend: "gscrivano"
- l_docker_image_tag: "latest"
-
- - name: Set container engine image tag
- set_fact:
- l_docker_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use Red Hat Registry for image when distribution is Red Hat
- set_fact:
- l_docker_image_prepend: "registry.access.redhat.com/openshift3"
- when: ansible_distribution == 'RedHat'
-
- - name: Use Fedora Registry for image when distribution is Fedora
- set_fact:
- l_docker_image_prepend: "registry.fedoraproject.org/f25"
- when: ansible_distribution == 'Fedora'
-
- - name: Set the full image name
- set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
-
- # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- - name: Use a specific image if requested
- set_fact:
- l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
- when:
- - openshift_docker_systemcontainer_image_override is defined
- - openshift_docker_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_docker_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_docker_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
@@ -154,10 +94,8 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- meta: flush_handlers
-
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/container_runtime/templates/daemon.json b/roles/container_runtime/templates/daemon.json
index 383963bd3..1a72d812a 100644
--- a/roles/container_runtime/templates/daemon.json
+++ b/roles/container_runtime/templates/daemon.json
@@ -5,10 +5,10 @@
"disable-legacy-registry": false,
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": {{ l_docker_insecure_registries }},
-{% if openshift_docker_log_driver is defined %}
+{% if openshift_docker_log_driver %}
"log-driver": "{{ openshift_docker_log_driver }}",
{%- endif %}
- "log-opts": {{ l_docker_log_options }},
+ "log-opts": {{ l_docker_log_options_dict }},
"runtimes": {
"oci": {
"path": "/usr/libexec/docker/docker-runc-current"
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index a2c2f98a7..52b9d09dd 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -21,7 +21,7 @@ dependencies:
etcd_client_port: 22379
etcd_conf_dir: /etc/contiv-etcd/
etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ inventory_hostname }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_config_dir: /etc/contiv-etcd/
etcd_url_scheme: http
etcd_peer_url_scheme: http
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index f679443e0..8a928ea54 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -8,51 +8,64 @@
- name: Contiv | Set globals
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+ run_once: true
- name: Contiv | Set arp mode to flood if ACI
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
+ run_once: true
- name: Contiv | Create default-net
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+ run_once: true
- name: Contiv | Create host access infra network for VxLan routing case
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+ run_once: true
#- name: Contiv | Create an allow-all policy for the default-group
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Set up aci external contract to consume default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Set up aci external contract to provide default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Create aci default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Add external contracts to the default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
#- name: Contiv | Add policy rule 1 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
+# run_once: true
#- name: Contiv | Add policy rule 2 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Create default aci app profile
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
+ run_once: true
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 07bb16ea7..c98e7b6a5 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -13,9 +13,15 @@
- name: Netmaster IPtables | Open Netmaster with iptables
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
with_items:
- - "{{ netmaster_port }}"
- "{{ contiv_rpc_port1 }}"
- "{{ contiv_rpc_port2 }}"
- "{{ contiv_rpc_port3 }}"
when: iptablesrules.stdout.find("contiv") == -1
notify: Save iptables rules
+
+- name: Netmaster IPtables | Open netmaster main port
+ command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index 07401a6dd..d12436f96 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -6,10 +6,17 @@
failed_when: false
check_mode: no
+- name: RPM | Determine if firewalld enabled
+ command: "systemctl status firewalld.service"
+ register: ss
+ changed_when: false
+ failed_when: false
+ check_mode: no
+
- name: Set the has_firewalld fact
set_fact:
has_firewalld: true
- when: s.rc == 0
+ when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
command: "rpm -q iptables-services"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 879ca4f4e..f2e1fc310 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -19,3 +19,4 @@ dependencies:
- role: lib_openshift
- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 82ac4fc84..ca8b6a707 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,9 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 488b6b0bc..2e4a0dc39 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -2,8 +2,8 @@
flannel_interface: "{{ ansible_default_ipv4.interface }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
-etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt"
-etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt"
-etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key"
+etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
+etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
+etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 80e4d391d..705d39f9a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -15,7 +15,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_restart_node_result
until: not l_restart_node_result | failed
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 1d0f5df6a..cd11fd9ff 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
-etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 3cb1fa8d0..83ca83350 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -86,7 +86,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_logging': {
'title': 'Logging Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
+ 'playbook': 'playbooks/openshift-logging/config.yml'
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index ffe814713..08f2d5adc 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -36,7 +36,7 @@
- name: Configure OpenShift node with disabled service proxy
lineinfile:
- dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
@@ -44,5 +44,5 @@
- name: force node restart to disable the proxy
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 6bf6c1db2..96c215f00 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -229,6 +229,7 @@ data:
# TODO (apuimedo): Remove the duplicated line just after this one once the
# RDO packaging contains the upstream patch
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py
new file mode 100644
index 000000000..615021ac5
--- /dev/null
+++ b/roles/lib_utils/library/oo_ec2_group.py
@@ -0,0 +1,903 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: skip-file
+# flake8: noqa
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ required: false
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4.
+ required: false
+ version_added: "1.6"
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules
+ required: false
+ default: 'true'
+ aliases: []
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress
+ required: false
+ default: 'true'
+ aliases: []
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ choices: [ 'yes', 'no' ]
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+'''
+
+EXAMPLES = '''
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+
+- name: "Delete group by its id"
+ ec2_group:
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: string
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: string
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: string
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: string
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info
+from ansible.module_utils.ec2 import ec2_argument_spec
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import HAS_BOTO3
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
+from ansible.module_utils.ec2 import AWSRetry
+import traceback
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def make_rule_key(prefix, rule, group_id, cidr_ip):
+ if 'proto' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
+ elif 'IpProtocol' in rule:
+ proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
+ if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
+ from_port = 'none'
+ to_port = 'none'
+ key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
+ return key.lower().replace('-none', '-None')
+
+
+def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
+ for rule in ipPermissions:
+ for groupGrant in rule.get('UserIdGroupPairs', []):
+ dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
+ for ipv4Grants in rule.get('IpRanges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
+ for ipv6Grants in rule.get('Ipv6Ranges', []):
+ dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (group_id, ip) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+
+ FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
+ group_id = None
+ group_name = None
+ ip = None
+ ipv6 = None
+ target_group_created = False
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ip, not both")
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ip, not both")
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg="Specify group_id OR group_name, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ elif 'group_id' in rule:
+ group_id = rule['group_id']
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ module.fail_json(msg="group %s will be automatically created by rule %s and "
+ "no description was provided" % (group_name, rule))
+ if not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ auto_group = client.create_security_group(**params)
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ elif 'cidr_ip' in rule:
+ ip = rule['cidr_ip']
+ elif 'cidr_ipv6' in rule:
+ ipv6 = rule['cidr_ipv6']
+
+ return group_id, ip, ipv6, target_group_created
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, str):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((port.strip(),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = from_to
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def authorize_ip(type, changed, client, group, groupRules,
+ ip, ip_permission, module, rule, ethertype):
+ # If rule already exists, don't later delete it
+ for thisip in ip:
+ rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_ip_grant(rule, thisip, ethertype)
+ if ip_permission:
+ try:
+ if type == "in":
+ client.authorize_security_group_ingress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ elif type == "out":
+ client.authorize_security_group_egress(GroupId=group['GroupId'],
+ IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
+ (type, thisip, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ return changed, ip_permission
+
+
+def serialize_group_grant(group_id, rule):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port'],
+ 'UserIdGroupPairs': [{'GroupId': group_id}]}
+
+ return fix_port_and_protocol(permission)
+
+
+def serialize_revoke(grant, rule):
+ permission = dict()
+ fromPort = rule['FromPort'] if 'FromPort' in rule else None
+ toPort = rule['ToPort'] if 'ToPort' in rule else None
+ if 'GroupId' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
+ }
+ elif 'CidrIp' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'IpRanges': [grant]
+ }
+ elif 'CidrIpv6' in grant:
+ permission = {'IpProtocol': rule['IpProtocol'],
+ 'FromPort': fromPort,
+ 'ToPort': toPort,
+ 'Ipv6Ranges': [grant]
+ }
+ return fix_port_and_protocol(permission)
+
+
+def serialize_ip_grant(rule, thisip, ethertype):
+ permission = {'IpProtocol': rule['proto'],
+ 'FromPort': rule['from_port'],
+ 'ToPort': rule['to_port']}
+ if ethertype == "ipv4":
+ permission['IpRanges'] = [{'CidrIp': thisip}]
+ elif ethertype == "ipv6":
+ permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
+
+ return fix_port_and_protocol(permission)
+
+
+def fix_port_and_protocol(permission):
+ for key in ['FromPort', 'ToPort']:
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = str(permission['IpProtocol'])
+
+ return permission
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+ client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
+ group = None
+ groups = dict()
+ security_groups = []
+ # do get all security groups
+ # find if the group is present
+ try:
+ response = get_security_groups_with_backoff(client)
+ security_groups = response.get('SecurityGroups', [])
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ for sg in security_groups:
+ groups[sg['GroupId']] = sg
+ groupName = sg['GroupName']
+ if groupName in groups:
+ # Prioritise groups from the current VPC
+ # even if current VPC is EC2-Classic
+ if groups[groupName].get('VpcId') == vpc_id:
+ # Group saved already matches current VPC, change nothing
+ pass
+ elif vpc_id is None and groups[groupName].get('VpcId') is None:
+ # We're in EC2 classic, and the group already saved is as well
+ # No VPC groups can be used alongside EC2 classic groups
+ pass
+ else:
+ # the current SG stored has no direct match, so we can replace it
+ groups[groupName] = sg
+ else:
+ groups[groupName] = sg
+
+ if group_id and sg['GroupId'] == group_id:
+ group = sg
+ elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
+ group = sg
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+
+ # if the group doesn't exist, create it now
+ else:
+ # no match found, create it
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ group = client.create_security_group(**params)
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+
+ changed = True
+
+ if tags is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ else:
+ module.fail_json(msg="Unsupported state requested: %s" % state)
+
+ # create a lookup for all existing rules on the group
+ ip_permission = []
+ if group:
+ # Manage ingress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules is not None:
+ for rule in rules:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize ingress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if ip and not isinstance(ip, list):
+ ip = [ip]
+
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
+ module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
+ module, rule, "ipv6")
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules:
+ for (rule, grant) in groupRules.values():
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to revoke ingress for security group '%s' - %s" %
+ (group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ # Manage egress rules
+ groupRules = {}
+ add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
+ # Now, go through all provided rules and ensure they are there.
+ if rules_egress is not None:
+ for rule in rules_egress:
+ validate_rule(module, rule)
+ group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
+ group, groups, vpc_id)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] in ('all', '-1', -1):
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ if group_id:
+ rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
+ if rule_id in groupRules:
+ del groupRules[rule_id]
+ else:
+ if not module.check_mode:
+ ip_permission = serialize_group_grant(group_id, rule)
+ if ip_permission:
+ ips = ip_permission
+ if vpc_id:
+ [useridpair.update({'VpcId': vpc_id}) for useridpair in
+ ip_permission.get('UserIdGroupPairs', [])]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(
+ msg="Unable to authorize egress for group %s security group '%s' - %s" %
+ (group_id, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ elif ip:
+ # Convert ip to list we can iterate over
+ if not isinstance(ip, list):
+ ip = [ip]
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
+ ip_permission, module, rule, "ipv4")
+ elif ipv6:
+ # Convert ip to list we can iterate over
+ if not isinstance(ipv6, list):
+ ipv6 = [ipv6]
+ # If rule already exists, don't later delete it
+ changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
+ ip_permission, module, rule, "ipv6")
+ elif vpc_id is not None:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
+ if default_egress_rule not in groupRules:
+ if not module.check_mode:
+ ip_permission = [{'IpProtocol': '-1',
+ 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
+ }
+ ]
+ try:
+ client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
+ ('0.0.0.0/0',
+ group['GroupName'],
+ e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+ else:
+ # make sure the default egress rule is not removed
+ del groupRules[default_egress_rule]
+
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ if purge_rules_egress and vpc_id is not None:
+ for (rule, grant) in groupRules.values():
+ # we shouldn't be revoking 0.0.0.0 egress
+ if grant != '0.0.0.0/0':
+ ip_permission = serialize_revoke(grant, rule)
+ if not module.check_mode:
+ try:
+ client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
+ (grant, group['GroupName'], e),
+ exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ changed = True
+
+ if group:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group)
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
+ tag_name_key_name='key', tag_value_key_name='value')
+ module.exit_json(changed=changed, **security_group)
+ else:
+ module.exit_json(changed=changed, group_id=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 410b739e9..7b55dda56 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,21 +1,19 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when: >
(openshift_master_ha | bool) and
- (not master_api_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_api_service_status_changed | default(false))
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when: >
(openshift_master_ha | bool) and
- (not master_controllers_service_status_changed | default(false)) and
- openshift.master.cluster_method == 'native'
+ (not master_controllers_service_status_changed | default(false))
ignore_errors: yes
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index e68ae74bd..ede6f2125 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
+ systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index fdf01b7c2..88d62de49 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
-openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node
nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
deleted file mode 100644
index 8c10c9991..000000000
--- a/roles/openshift_atomic/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-OpenShift Atomic
-================
-
-This role houses atomic specific tasks.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-```
-- name: Ensure atomic proxies are defined
- hosts: localhost
- roles:
- - role: openshift_atomic
-```
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
deleted file mode 100644
index ea129f514..000000000
--- a/roles/openshift_atomic/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Atomic related tasks
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_openshift
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 42ef22846..74e5d1dde 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_current_version: ''
-openshift_aws_new_version: ''
-
+openshift_aws_node_group_upgrade: False
openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
@@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
openshift_aws_iam_role_name: openshift_node_describe_instances
openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
@@ -34,14 +31,12 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
openshift_aws_ami_tags:
bootstrap: "true"
openshift-created: "true"
- clusterid: "{{ openshift_aws_clusterid }}"
parent: "{{ openshift_aws_base_ami | default('unknown') }}"
openshift_aws_s3_mode: create
@@ -124,6 +119,20 @@ openshift_aws_ami_map:
infra: "{{ openshift_aws_ami }}"
compute: "{{ openshift_aws_ami }}"
+openshift_aws_master_group:
+- name: "{{ openshift_aws_clusterid }} master group"
+ group: master
+
+openshift_aws_node_groups:
+- name: "{{ openshift_aws_clusterid }} compute group"
+ group: compute
+- name: "{{ openshift_aws_clusterid }} infra group"
+ group: infra
+
+openshift_aws_created_asgs: []
+openshift_aws_current_asgs: []
+
+# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
@@ -139,7 +148,6 @@ openshift_aws_master_group_config:
host-type: master
sub-host-type: default
runtime: docker
- version: "{{ openshift_aws_new_version }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -163,7 +171,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: compute
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -183,7 +190,6 @@ openshift_aws_node_group_config:
host-type: node
sub-host-type: infra
runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -283,21 +289,4 @@ openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
-openshift_aws_node_groups: nodes
-
openshift_aws_masters_groups: masters,etcd,nodes
-
-# If creating extra node groups, you'll need to define all of the following
-
-# The format is the same as openshift_aws_node_group_config, but the top-level
-# key names should be different (ie, not == master or infra).
-# openshift_aws_node_group_config_extra: {}
-
-# This variable should look like openshift_aws_launch_config_security_groups
-# and contain a one-to-one mapping of top level keys that are defined in
-# openshift_aws_node_group_config_extra.
-# openshift_aws_launch_config_security_groups_extra: {}
-
-# openshift_aws_node_security_groups_extra: {}
-
-# openshift_aws_ami_map_extra: {}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index e707abd3f..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -4,11 +4,43 @@
Custom filters for use in openshift_aws
'''
+from ansible import errors
+
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
+ def scale_groups_serial(scale_group_info, upgrade=False):
+ ''' This function will determine what the deployment serial should be and return it
+
+ Search through the tags and find the deployment_serial tag. Once found,
+ determine if an increment is needed during an upgrade.
+ if upgrade is true then increment the serial and return it
+ else return the serial
+ '''
+ if scale_group_info == []:
+ return 1
+
+ scale_group_info = scale_group_info[0]
+
+ if not isinstance(scale_group_info, dict):
+ raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
+
+ serial = None
+
+ for tag in scale_group_info['tags']:
+ if tag['key'] == 'deployment_serial':
+ serial = int(tag['value'])
+ if upgrade:
+ serial += 1
+ break
+ else:
+ raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
+
+ return serial
+
+ @staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
@@ -38,4 +70,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity}
+ 'scale_groups_match_capacity': self.scale_groups_match_capacity,
+ 'scale_groups_serial': self.scale_groups_serial}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index ae320962f..c2a2cea30 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,6 +1,6 @@
---
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -11,7 +11,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -22,9 +22,14 @@
delay: 3
until: "'instances' in instancesout and instancesout.instances|length > 0"
-- debug:
+- name: Dump the private dns names
+ debug:
msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+- name: Dump the master public ip address
+ debug:
+ msg: "{{ mastersout.instances[0].public_ip_address }}"
+
- name: approve nodes
oc_adm_csr:
#approve_all: True
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 7e8e9b679..7fb617dd5 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,6 +1,4 @@
---
-# This task file expects l_nodes_to_build to be passed in.
-
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -26,12 +24,41 @@
# Need to set epoch time in one place to use for launch_config and scale_group
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+#
+# query asg's and determine if we need to create the others.
+# if we find more than 1 for each type, then exit
+- name: query all asg's for this cluster
+ ec2_asg_facts:
+ region: "{{ openshift_aws_region }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ register: asgs
+
+- fail:
+ msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}"
+ when:
+ - asgs.results|length > 1
+
+- debug:
+ msg: "{{ asgs }}"
+
+- name: set the value for the deployment_serial and the current asgs
+ set_fact:
+ l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+ openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
+
+- name: dump deployment serial
+ debug:
+ msg: "Deployment serial: {{ l_deployment_serial }}"
+
+- name: dump current_asgs
+ debug:
+ msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}"
- when: openshift_aws_create_iam_role
- include: iam_role.yml
+ include_tasks: iam_role.yml
- when: openshift_aws_create_launch_config
- include: launch_config.yml
+ include_tasks: launch_config.yml
- when: openshift_aws_create_scale_group
- include: scale_group.yml
+ include_tasks: scale_group.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
index d9910d938..cf3bb28fb 100644
--- a/roles/openshift_aws/tasks/iam_role.yml
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -13,11 +13,10 @@
#####
- name: Create an iam role
iam_role:
- name: "{{ item.value.iam_role }}"
+ name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
#####
# The second part of this task file is linking the role to a policy
@@ -28,9 +27,8 @@
- name: create an iam policy
iam_policy:
iam_type: role
- iam_name: "{{ item.value.iam_role }}"
- policy_json: "{{ item.value.policy_json }}"
- policy_name: "{{ item.value.policy_name }}"
+ iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+ policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
+ policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 0dbeba5a0..6f78ee00a 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -1,15 +1,26 @@
---
-- fail:
- msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
- when:
- - openshift_aws_ami is undefined
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
-- fail:
- msg: "Ensure that openshift_deployment_type is defined."
- when:
- - openshift_deployment_type is undefined
-
-- include: launch_config_create.yml
- with_dict: "{{ l_nodes_to_build }}"
- loop_control:
- loop_var: launch_config_item
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
+ instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+ l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
deleted file mode 100644
index f7f0f0953..000000000
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
- instance_type: "{{ launch_config_item.value.instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
- launch_config_item.value.iam_role != '' and
- openshift_aws_create_iam_role
- else omit }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ launch_config_item.value.volumes }}"
- assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 91538ed5c..786a2e4cf 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,16 +1,16 @@
---
- when: openshift_aws_create_iam_cert | bool
name: create the iam_cert for elb certificate
- include: iam_cert.yml
+ include_tasks: iam_cert.yml
- when: openshift_aws_create_s3 | bool
name: create s3 bucket for registry
- include: s3.yml
+ include_tasks: s3.yml
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create elbs
- include: elb.yml
+ include_tasks: elb.yml
with_dict: "{{ openshift_aws_elb_dict }}"
vars:
l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
@@ -19,14 +19,15 @@
loop_var: l_elb_dict_item
- name: include scale group creation for master
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_master_group }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
+ l_node_group_config: "{{ openshift_aws_master_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 3349acb7a..696b323c0 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -3,7 +3,7 @@
set_fact:
openshift_node_bootstrap: True
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: create instance for ami creation
ec2:
@@ -27,7 +27,7 @@
Name: "{{ openshift_aws_base_ami_name }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 3e84666a2..d82f18574 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -3,7 +3,7 @@
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
- name: fetch master instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -27,24 +27,19 @@
set_fact:
openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
-- include: vpc_and_subnet_id.yml
+- include_tasks: vpc_and_subnet_id.yml
- name: include build compute and infra node groups
- include: build_node_group.yml
+ include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_node_groups }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-
-- name: include build node group for extra nodes
- include: build_node_group.yml
- when: openshift_aws_node_group_config_extra is defined
- vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+ l_node_group_config: "{{ openshift_aws_node_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
- include: wait_for_groups.yml
+ include_tasks: wait_for_groups.yml
+ vars:
+ created_asgs: "{{ openshift_aws_created_asgs }}"
diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml
index 55d1af2b5..a01cde294 100644
--- a/roles/openshift_aws/tasks/remove_scale_group.yml
+++ b/roles/openshift_aws/tasks/remove_scale_group.yml
@@ -1,10 +1,13 @@
---
+# FIGURE OUT HOW TO REMOVE SCALE GROUPS
+# use openshift_aws_current_asgs??
- name: fetch the scale groups
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
+ name: "^{{ item }}$"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'version': openshift_aws_current_version} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}"
register: qasg
- name: remove non-master scale groups
@@ -14,7 +17,7 @@
name: "{{ item.auto_scaling_group_name }}"
when: "'master' not in item.auto_scaling_group_name"
register: asg_results
- with_items: "{{ qasg.results }}"
+ with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}"
async: 600
poll: 0
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
index 9cf37c840..ba70bcff6 100644
--- a/roles/openshift_aws/tasks/s3.yml
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -1,6 +1,6 @@
---
- name: Create an s3 bucket
- s3:
+ aws_s3:
bucket: "{{ openshift_aws_s3_bucket_name }}"
mode: "{{ openshift_aws_s3_mode }}"
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 30df7545d..3632f7ce9 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,20 +1,30 @@
---
+- name: set node group name
+ set_fact:
+ l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
+
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
- launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- health_check_period: "{{ item.value.health_check.period }}"
- health_check_type: "{{ item.value.health_check.type }}"
- min_size: "{{ item.value.min_size }}"
- max_size: "{{ item.value.max_size }}"
- desired_capacity: "{{ item.value.desired_size }}"
+ name: "{{ l_node_group_name }}"
+ launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}"
+ health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}"
+ min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}"
+ max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}"
+ desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
- load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
- wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
+ termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != []
+ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
- with_dict: "{{ l_nodes_to_build }}"
+ - "{{ openshift_aws_node_group_config_tags
+ | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
+
+- name: append the asg name to the openshift_aws_created_asgs fact
+ set_fact:
+ openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 0cb749dcc..74877d5c7 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,6 +1,6 @@
---
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -12,7 +12,7 @@
- name: bundle ami
ec2_ami:
- instance_id: "{{ instancesout.instances.0.id }}"
+ instance_id: "{{ instancesout.instances.0.instance_id }}"
region: "{{ openshift_aws_region }}"
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
@@ -31,7 +31,7 @@
source-ami: "{{ amioutput.image_id }}"
- name: copy the ami for encrypted disks
- include: ami_copy.yml
+ include_tasks: ami_copy.yml
vars:
openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
@@ -46,4 +46,4 @@
ec2:
state: absent
region: "{{ openshift_aws_region }}"
- instance_ids: "{{ instancesout.instances.0.id }}"
+ instance_ids: "{{ instancesout.instances.0.instance_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 5cc7ae537..0568a0190 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,27 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include: security_group_create.yml
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups }}"
+- name: create the node group sgs
+ oo_ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ oo_ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+ register: k8s_sg_create
-- include: security_group_create.yml
- when: openshift_aws_node_security_groups_extra is defined
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
deleted file mode 100644
index ef6060555..000000000
--- a/roles/openshift_aws/tasks/security_group_create.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.value.name}}"
- description: "{{ item.value.desc }}"
- rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.value.name }}_k8s"
- description: "{{ item.value.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
- register: k8s_sg_create
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags: "{{ openshift_aws_security_groups_tags }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
index 05b68f460..700917ef4 100644
--- a/roles/openshift_aws/tasks/setup_master_group.yml
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -8,7 +8,7 @@
msg: "openshift_aws_region={{ openshift_aws_region }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -19,11 +19,13 @@
delay: 3
until: instancesout.instances|length > 0
+- debug: var=instancesout
+
- name: add new master to masters group
add_host:
groups: "{{ openshift_aws_masters_groups }}"
name: "{{ item.public_dns_name }}"
- hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
with_items: "{{ instancesout.instances }}"
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
index d65fdc2de..14c5246c9 100644
--- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml
+++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
@@ -1,11 +1,15 @@
---
-- name: group scale group nodes
- ec2_remote_facts:
+- name: fetch all created instances
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
- "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}"
+ "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
+ 'instance-state-name': 'running'} }}"
register: qinstances
+# The building of new and current groups is dependent of having a list of the current asgs and the created ones
+# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are
+# new and which hosts are current.
- name: Build new node group
add_host:
groups: oo_sg_new_nodes
@@ -13,10 +17,16 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default(False)) == openshift_aws_new_version
+ - openshift_aws_created_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
+- name: dump openshift_aws_current_asgs
+ debug:
+ msg: "{{ openshift_aws_current_asgs }}"
+
- name: Build current node group
add_host:
groups: oo_sg_current_nodes
@@ -24,7 +34,9 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default('')) == openshift_aws_current_version
+ - openshift_aws_current_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index d7851d887..4f4730dd6 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -1,16 +1,26 @@
---
-- fail:
- msg: 'Please ensure the current_version and new_version variables are not the same.'
+- include_tasks: provision_nodes.yml
+ vars:
+ openshift_aws_node_group_upgrade: True
when:
- - openshift_aws_current_version == openshift_aws_new_version
+ - openshift_aws_upgrade_provision_nodes | default(True)
+
+- debug: var=openshift_aws_current_asgs
+- debug: var=openshift_aws_created_asgs
-- include: provision_nodes.yml
+- name: fail if asg variables aren't set
+ fail:
+ msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined."
+ when:
+ - openshift_aws_created_asgs == []
+ - openshift_aws_current_asgs == []
-- include: accept_nodes.yml
+- include_tasks: accept_nodes.yml
+ when: openshift_aws_upgrade_accept_nodes | default(True)
-- include: setup_scale_group_facts.yml
+- include_tasks: setup_scale_group_facts.yml
-- include: setup_master_group.yml
+- include_tasks: setup_master_group.yml
vars:
# we do not set etcd here as its limited to 1 or 3
openshift_aws_masters_groups: masters,nodes
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 9f1a68a2a..1f4ef3e1c 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -1,31 +1,41 @@
---
# The idea here is to wait until all scale groups are at
# their desired capacity before continuing.
-- name: fetch the scale groups
+# This is accomplished with a custom filter_plugin and until clause
+- name: "fetch the scale groups"
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
- until: qasg.results | scale_groups_match_capacity | bool
+ until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
+- debug: var=openshift_aws_created_asgs
+
+# how do we gaurantee the instances are up?
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'tag:version': openshift_aws_new_version} }}"
+ 'tag:aws:autoscaling:groupName': item,
+ 'instance-state-name': 'running'} }}"
+ with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}"
register: instancesout
until: instancesout.instances|length > 0
delay: 5
retries: 60
+- name: dump instances
+ debug:
+ msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
+
- name: wait for ssh to become available
wait_for:
port: 22
host: "{{ item.public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index fe0fe83d4..bda1334cd 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,8 +7,8 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ launch_config_item.key }}
-{% if launch_config_item.key != 'master' %}
+ openshift_group_type: {{ openshift_aws_node_group.group }}
+{% if openshift_aws_node_group.group != 'master' %}
- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if launch_config_item.key != 'master' %}
+{% if openshift_aws_node_group.group != 'master' %}
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index e0b51eee0..612b6522d 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -4,11 +4,6 @@
role: builddefaults
# TODO: add ability to define builddefaults env vars sort of like this
# may need to move the config generation to a filter however.
- # openshift_env: "{{ hostvars
- # | oo_merge_hostvars(vars, inventory_hostname)
- # | oo_openshift_env }}"
- # openshift_env_structures:
- # - 'openshift.builddefaults.env.*'
local_facts:
http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index f8b784a63..81b49ce60 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_cli
+- role: openshift_facts
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 05e0a1352..eb00f13db 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,7 +9,7 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when: not openshift.common.is_containerized | bool
register: install_result
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 140c6ea26..888aa8f0c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift.common.service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_cluster_autoscaler/tasks/main.yml b/roles/openshift_cluster_autoscaler/tasks/main.yml
index 173dcf044..ca7dfb885 100644
--- a/roles/openshift_cluster_autoscaler/tasks/main.yml
+++ b/roles/openshift_cluster_autoscaler/tasks/main.yml
@@ -31,7 +31,7 @@
type: role
name: "{{ openshift_cluster_autoscaler_name }}"
-- include: aws.yml
+- include_tasks: aws.yml
when: openshift_cluster_autoscaler_cloud_provider == 'aws'
- name: create the policies
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 1d5fba990..68a0e8857 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.4.6
+XPAAS_VERSION=ose-v1.4.7
ORIGIN_VERSION=${1:-v3.7}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
new file mode 120000
index 000000000..8cad94b63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/latest
@@ -0,0 +1 @@
+v3.9 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8
deleted file mode 120000
index 8ddcf661c..000000000
--- a/roles/openshift_examples/files/examples/v3.7/v3.8
+++ /dev/null
@@ -1 +0,0 @@
-v3.8 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
index e7af160d9..0aa586061 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
index 2b082fc75..7ab9f4e59 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
index 3771280bf..d7cab4889 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..6186bba10 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..59dc0d22b 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest
deleted file mode 120000
index b9bc2fdcb..000000000
--- a/roles/openshift_examples/files/examples/v3.9/latest
+++ /dev/null
@@ -1 +0,0 @@
-latest \ No newline at end of file
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 80cb88d45..7b43d5adf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -28,7 +28,7 @@ Role Variables
| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
-| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift |
| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
@@ -46,15 +46,12 @@ Example Playbook
# Disable all excluders
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Enable all excluders
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
# Disable all excluders and verify appropriate excluder packages are available for upgrade
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index d4f151142..3a910e490 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -2,7 +2,7 @@
# keep the 'current' package or update to 'latest' if available?
r_openshift_excluder_package_state: present
r_openshift_excluder_docker_package_state: present
-
+r_openshift_excluder_service_type: "{{ openshift_service_type }}"
# Legacy variables are included for backwards compatibility with v3.5
# Inventory variables Legacy
# openshift_enable_excluders enable_excluders
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 871081c19..a9653edda 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_facts
- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index 93d6ef149..f0e87ba25 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -19,11 +19,6 @@
msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
when: r_openshift_excluder_action not in ['enable', 'disable']
- - name: Fail if r_openshift_excluder_service_type is not defined
- fail:
- msg: "r_openshift_excluder_service_type must be specified for this role"
- when: r_openshift_excluder_service_type is not defined
-
- name: Fail if r_openshift_excluder_upgrade_target is not defined
fail:
msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 7064d727a..53a3bc87e 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -3,4 +3,104 @@ openshift_cli_image_dict:
origin: 'openshift/origin'
openshift-enterprise: 'openshift3/ose'
+openshift_hosted_images_dict:
+ origin: 'openshift/origin-${component}:${version}'
+ openshift-enterprise: 'openshift3/ose-${component}:${version}'
+
openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
+
+# osm_default_subdomain is an old migrated fact, can probably be removed.
+osm_default_subdomain: "router.default.svc.cluster.local"
+openshift_master_default_subdomain: "{{ osm_default_subdomain }}"
+
+openshift_hosted_etcd_storage_nfs_directory: '/exports'
+openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_etcd_storage_volume_name: 'etcd'
+openshift_hosted_etcd_storage_volume_size: '1Gi'
+openshift_hosted_etcd_storage_create_pv: True
+openshift_hosted_etcd_storage_create_pvc: False
+openshift_hosted_etcd_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_hosted_registry_namespace: 'default'
+openshift_hosted_registry_storage_volume_name: 'registry'
+openshift_hosted_registry_storage_volume_size: '5Gi'
+openshift_hosted_registry_storage_create_pv: True
+openshift_hosted_registry_storage_create_pvc: True
+openshift_hosted_registry_storage_nfs_directory: '/exports'
+openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints'
+openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume
+openshift_hosted_registry_storage_glusterfs_readOnly: False
+openshift_hosted_registry_storage_glusterfs_swap: False
+openshift_hosted_registry_storage_glusterfs_swapcopy: True
+openshift_hosted_registry_storage_glusterfs_ips: []
+openshift_hosted_registry_storage_access_modes:
+ - 'ReadWriteMany'
+
+openshift_logging_storage_nfs_directory: '/exports'
+openshift_logging_storage_nfs_options: '*(rw,root_squash)'
+openshift_logging_storage_volume_name: 'logging-es'
+openshift_logging_storage_create_pv: True
+openshift_logging_storage_create_pvc: False
+openshift_logging_storage_access_modes:
+ - ['ReadWriteOnce']
+
+openshift_loggingops_storage_volume_name: 'logging-es-ops'
+openshift_loggingops_storage_volume_size: '10Gi'
+openshift_loggingops_storage_create_pv: True
+openshift_loggingops_storage_create_pvc: False
+openshift_loggingops_storage_nfs_directory: '/exports'
+openshift_loggingops_storage_nfs_options: '*(rw,root_squash)'
+openshift_loggingops_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_metrics_deploy: False
+openshift_metrics_duration: 7
+openshift_metrics_resolution: '10s'
+openshift_metrics_storage_volume_name: 'metrics'
+openshift_metrics_storage_volume_size: '10Gi'
+openshift_metrics_storage_create_pv: True
+openshift_metrics_storage_create_pvc: False
+openshift_metrics_storage_nfs_directory: '/exports'
+openshift_metrics_storage_nfs_options: '*(rw,root_squash)'
+openshift_metrics_storage_access_modes:
+ - 'ReadWriteOnce'
+
+openshift_prometheus_storage_volume_name: 'prometheus'
+openshift_prometheus_storage_volume_size: '10Gi'
+openshift_prometheus_storage_nfs_directory: '/exports'
+openshift_prometheus_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_storage_create_pv: True
+openshift_prometheus_storage_create_pvc: False
+
+openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager'
+openshift_prometheus_alertmanager_storage_volume_size: '10Gi'
+openshift_prometheus_alertmanager_storage_nfs_directory: '/exports'
+openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertmanager_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertmanager_storage_create_pv: True
+openshift_prometheus_alertmanager_storage_create_pvc: False
+
+openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer'
+openshift_prometheus_alertbuffer_storage_volume_size: '10Gi'
+openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports'
+openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertbuffer_storage_access_modes:
+ - 'ReadWriteOnce'
+openshift_prometheus_alertbuffer_storage_create_pv: True
+openshift_prometheus_alertbuffer_storage_create_pvc: False
+
+
+openshift_router_selector: "region=infra"
+openshift_hosted_router_selector: "{{ openshift_router_selector }}"
+openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 508228b2e..a10ba9310 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -11,14 +11,13 @@ import copy
import errno
import json
import re
-import io
import os
import yaml
import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
@@ -86,24 +85,6 @@ def migrate_node_facts(facts):
return facts
-def migrate_hosted_facts(facts):
- """ Apply migrations for master facts """
- if 'master' in facts:
- if 'router_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
- if 'registry_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
- return facts
-
-
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
@@ -113,8 +94,7 @@ def migrate_admission_plugin_facts(facts):
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
@@ -125,7 +105,6 @@ def migrate_local_facts(facts):
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
@@ -412,58 +391,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-# pylint: disable=too-many-branches
-def set_selectors(facts):
- """ Set selectors facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated selectors
- facts if they were not already present
-
- """
- selector = "region=infra"
-
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
- facts['hosted']['router']['selector'] = selector
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
- facts['hosted']['registry']['selector'] = selector
- if 'metrics' not in facts['hosted']:
- facts['hosted']['metrics'] = {}
- if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
- facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts or not isinstance(facts['logging'], dict):
- facts['logging'] = {}
- if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
- facts['logging']['selector'] = None
- if 'etcd' not in facts['hosted']:
- facts['hosted']['etcd'] = {}
- if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
- facts['hosted']['etcd']['selector'] = None
- if 'prometheus' not in facts:
- facts['prometheus'] = {}
- if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
- facts['prometheus']['selector'] = None
- if 'alertmanager' not in facts['prometheus']:
- facts['prometheus']['alertmanager'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
- facts['prometheus']['alertmanager']['selector'] = None
- if 'alertbuffer' not in facts['prometheus']:
- facts['prometheus']['alertbuffer'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
- facts['prometheus']['alertbuffer']['selector'] = None
-
- return facts
-
-
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -531,7 +458,6 @@ def set_url_facts_if_unset(facts):
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
- facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
@@ -608,63 +534,9 @@ def set_aggregate_facts(facts):
return facts
-def set_etcd_facts_if_unset(facts):
- """
- If using embedded etcd, loads the data directory from master-config.yaml.
-
- If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
-
- If anything goes wrong parsing these, the fact will not be set.
- """
- if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- if 'etcd_data_dir' not in etcd_facts:
- try:
- # Parse master config to find actual etcd data dir:
- master_cfg_path = os.path.join(facts['common']['config_base'],
- 'master/master-config.yaml')
- master_cfg_f = open(master_cfg_path, 'r')
- config = yaml.safe_load(master_cfg_f.read())
- master_cfg_f.close()
-
- etcd_facts['etcd_data_dir'] = \
- config['etcdConfig']['storageDirectory']
-
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
- else:
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
- ini_fp = io.StringIO(ini_str)
- config = configparser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
-
- etcd_facts['etcd_data_dir'] = etcd_data_dir
- facts['etcd'] = etcd_facts
-
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
-
- return facts
-
-
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
+ includes master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
@@ -676,14 +548,6 @@ def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
-
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
@@ -980,7 +844,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
+ facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
return facts
@@ -1002,7 +866,7 @@ def build_controller_args(facts):
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
+ facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
@@ -1024,7 +888,7 @@ def build_api_server_args(facts):
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
+ facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
@@ -1147,8 +1011,13 @@ def get_container_openshift_version(facts):
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
+ deployment_type = facts['common']['deployment_type']
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
+
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
+ env_path = filename % service_type
if not os.path.exists(env_path):
continue
@@ -1211,7 +1080,7 @@ def apply_provider_facts(facts, provider_facts):
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
-def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
@@ -1219,14 +1088,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
- protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
@@ -1258,14 +1124,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
@@ -1275,18 +1134,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
@@ -1559,7 +1406,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1620,8 +1466,6 @@ class OpenShiftFacts(object):
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
@@ -1631,21 +1475,13 @@ class OpenShiftFacts(object):
'cloudprovider',
'common',
'etcd',
- 'hosted',
'master',
- 'node',
- 'logging',
- 'loggingops',
- 'metrics',
- 'prometheus']
+ 'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -1667,34 +1503,23 @@ class OpenShiftFacts(object):
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
+ additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
+
local_facts = self.init_local_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
@@ -1712,12 +1537,10 @@ class OpenShiftFacts(object):
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
- facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
@@ -1727,7 +1550,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
- facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
@@ -1760,7 +1582,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1772,7 +1594,7 @@ class OpenShiftFacts(object):
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
- embedded_etcd=True, embedded_kube=True,
+ embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
@@ -1793,178 +1615,6 @@ class OpenShiftFacts(object):
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
- if 'hosted' in roles or self.role == 'hosted':
- defaults['hosted'] = dict(
- etcd=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='etcd',
- size='1Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- registry=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='registry',
- size='5Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'),
- glusterfs=dict(
- endpoints='glusterfs-registry-endpoints',
- path='glusterfs-registry-volume',
- ips=[],
- readOnly=False,
- swap=False,
- swapcopy=True),
- host=None,
- access=dict(
- modes=['ReadWriteMany']
- ),
- create_pv=True,
- create_pvc=True
- )
- ),
- router=dict()
- )
-
- defaults['logging'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['loggingops'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['metrics'] = dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertmanager'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertmanager',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
- defaults['prometheus']['alertbuffer'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertbuffer',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
-
return defaults
def guess_host_provider(self):
@@ -2037,65 +1687,17 @@ class OpenShiftFacts(object):
)
return provider_facts
- @staticmethod
- def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
- """ Split openshift_env facts based on openshift_env structures.
-
- Args:
- openshift_env_fact (string): the openshift_env fact to split
- ex: 'openshift_cloudprovider_openstack_auth_url'
- openshift_env_structures (list): a list of structures to determine fact keys
- ex: ['openshift.cloudprovider.openstack.*']
- Returns:
- list: a list of keys that represent the fact
- ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
- """
- # By default, we'll split an openshift_env fact by underscores.
- fact_keys = openshift_env_fact.split('_')
-
- # Determine if any of the provided variable structures match the fact.
- matching_structure = None
- if openshift_env_structures is not None:
- for structure in openshift_env_structures:
- if re.match(structure, openshift_env_fact):
- matching_structure = structure
- # Fact didn't match any variable structures so return the default fact keys.
- if matching_structure is None:
- return fact_keys
-
- final_keys = []
- structure_keys = matching_structure.split('.')
- for structure_key in structure_keys:
- # Matched current key. Add to final keys.
- if structure_key == fact_keys[structure_keys.index(structure_key)]:
- final_keys.append(structure_key)
- # Wildcard means we will be taking everything from here to the end of the fact.
- elif structure_key == '*':
- final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
- # Shouldn't have gotten here, return the fact keys.
- else:
- return fact_keys
- return final_keys
-
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
-
-
Returns:
dict: The result of merging the provided facts with existing
local facts
@@ -2107,45 +1709,13 @@ class OpenShiftFacts(object):
if facts is not None:
facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env is not None:
- for fact, value in iteritems(openshift_env):
- oo_env_facts = dict()
- current_level = oo_env_facts
- keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
-
- if len(keys) > 0 and keys[0] != self.role:
- continue
-
- # Build a dictionary from the split fact keys.
- # After this loop oo_env_facts is the resultant dictionary.
- # For example:
- # fact = "openshift_metrics_install_metrics"
- # value = 'true'
- # keys = ['metrics', 'install', 'metrics']
- # result = {'metrics': {'install': {'metrics': 'true'}}}
- for i, _ in enumerate(keys):
- # This is the last key. Set the value.
- if i == (len(keys) - 1):
- current_level[keys[i]] = value
- # This is a key other than the last key. Set as
- # dictionary and continue.
- else:
- current_level[keys[i]] = dict()
- current_level = current_level[keys[i]]
-
- facts_to_set = merge_facts(orig=facts_to_set,
- new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
-
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -2253,9 +1823,6 @@ def main():
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
- openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
@@ -2271,19 +1838,13 @@ def main():
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
- openshift_env = module.params['openshift_env'] # noqa: F405
- openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 6c5662a4e..94961f2d4 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -12,7 +12,7 @@ Checks are typically implemented as two parts:
The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
-[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
+[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml)
for an example.
The action plugin dynamically discovers all checks and executes only those
diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml
new file mode 100644
index 000000000..f25a0dc79
--- /dev/null
+++ b/roles/openshift_health_checker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 090e438ff..980e23f27 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
group_names = self.get_var("group_names", default=[])
packages = set()
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 13a91dadf..f3a628e28 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
- rpm_prefix = self.get_var("openshift", "common", "service_type")
+ rpm_prefix = self.get_var("openshift_service_type")
+ if self._templar is not None:
+ rpm_prefix = self._templar.template(rpm_prefix)
openshift_release = self.get_var("openshift_release", default='')
deployment_type = self.get_var("openshift_deployment_type")
check_multi_minor_release = deployment_type in ['openshift-enterprise']
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index ec46c3b4b..fc333dfd4 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -8,12 +8,12 @@ def task_vars():
return dict(
openshift=dict(
common=dict(
- service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
+ openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index dd6f4ad81..a29dc166b 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
openshift=dict(
- common=dict(service_type="origin", is_containerized=False),
- )
+ common=dict(is_containerized=False),
+ ),
+ openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6f0457549..dd98ff4d8 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -10,10 +10,11 @@ def test_openshift_version_not_supported():
openshift_release = '111.7.0'
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -27,9 +28,10 @@ def test_invalid_openshift_release_format():
return {}
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_image_tag='v0',
openshift_deployment_type='origin',
+ openshift_service_type='origin'
)
with pytest.raises(OpenShiftCheckException) as excinfo:
@@ -47,9 +49,10 @@ def test_invalid_openshift_release_format():
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift=dict(common=dict()),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
+ openshift_service_type='origin'
)
return_value = {} # note: check.execute_module modifies return hash contents
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 9815acb38..a1e6e0879 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
- dict(openshift=dict(common=dict(service_type='openshift'))),
+ dict(openshift_service_type='origin'),
set(),
set(['openshift-master', 'openshift-node']),
),
(
dict(
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-master']),
@@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
@@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
(
dict(
- openshift=dict(common=dict(service_type='atomic-openshift')),
+ openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 3cf4ce033..ea8e02b97 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
+ service_type_dict = {'origin': 'origin',
+ 'openshift-enterprise': 'atomic-openshift'}
+ service_type = service_type_dict[deployment_type]
return dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type=deployment_type)),
+ openshift_service_type=service_type,
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type=deployment_type,
@@ -29,7 +32,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
ansible_pkg_mgr='yum',
- openshift=dict(common=dict(service_type='origin')),
+ openshift_service_type='origin',
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index a1c2c3956..3cf5d97c5 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -43,9 +43,9 @@ variables also control configuration behavior:
**NOTE:** Configuring a value for
`openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry`
-host group is not allowed. Specifying a `glusterfs_registry` host group
-indicates that a new GlusterFS cluster should be configured, whereas
-specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
+host group is not allowed. Specifying a `glusterfs_registry` host group
+indicates that a new GlusterFS cluster should be configured, whereas
+specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
to use a pre-configured GlusterFS cluster for the registry storage.
_
@@ -53,7 +53,6 @@ _
Dependencies
------------
-* openshift_hosted_facts
* openshift_persistent_volumes
Example Playbook
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index e70c0c420..b6501d288 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local'
r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
+openshift_hosted_router_namespace: 'default'
+
openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
openshift_hosted_router_edits:
@@ -40,13 +43,14 @@ openshift_hosted_router_edits:
value: 21600
action: put
+openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_routers:
- name: router
replicas: "{{ replicas | default(1) }}"
namespace: default
serviceaccount: router
selector: "{{ openshift_hosted_router_selector | default(None) }}"
- images: "{{ openshift_hosted_router_image | default(None) }}"
+ images: "{{ openshift_hosted_router_registryurl }}"
edits: "{{ openshift_hosted_router_edits }}"
stats_port: 1936
ports:
@@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: []
# Registry #
############
+openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
+penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_routecertificates: {}
+openshift_hosted_registry_routetermination: "passthrough"
+
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
index 7f41529ac..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
@@ -12,7 +12,7 @@ class FilterModule(object):
def get_router_replicas(replicas=None, router_nodes=None):
''' This function will return the number of replicas
based on the results from the defined
- openshift.hosted.router.replicas OR
+ openshift_hosted_router_replicas OR
the query from oc_obj on openshift nodes with a selector OR
default to 1
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 1d70ef7eb..ac9e241a5 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,6 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index e2e06594b..429f0c514 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -6,20 +6,20 @@
check_mode: no
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
-- when: openshift.hosted.registry.replicas | default(none) is none
+- when: openshift_hosted_registry_replicas | default(none) is none
block:
- name: Retrieve list of openshift nodes matching registry selector
oc_obj:
state: list
kind: node
- selector: "{{ openshift.hosted.registry.selector | default(omit) }}"
+ selector: "{{ openshift_hosted_registry_selector }}"
register: registry_nodes
- name: set_fact l_node_count to number of nodes matching registry selector
@@ -39,16 +39,13 @@
# just 1:
- name: set_fact l_default_replicas when l_node_count > 0
set_fact:
- l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
+ l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}"
when: l_node_count | int > 0
- name: set openshift_hosted facts
set_fact:
- openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
- openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}"
+ # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group
+ openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}"
- name: Update registry environment variables when pushing via dns
set_fact:
@@ -97,16 +94,14 @@
service_type: ClusterIP
clusterip: '{{ openshift_hosted_registry_clusterip | default(omit) }}'
-- include: secure.yml
- static: no
+- include_tasks: secure.yml
run_once: true
when:
- not (openshift_docker_hosted_registry_insecure | default(False)) | bool
-- include: storage/object_storage.yml
- static: no
+- include_tasks: storage/object_storage.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'object'
+ - openshift_hosted_registry_storage_kind | default(none) == 'object'
- name: Update openshift_hosted facts for persistent volumes
set_fact:
@@ -115,23 +110,23 @@
pvc_volume_mounts:
- name: registry-storage
type: persistentVolumeClaim
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs']
-- include: storage/glusterfs_endpoints.yml
+- include_tasks: storage/glusterfs_endpoints.yml
when:
- openshift_hosted_registry_storage_glusterfs_ips|length > 0
- - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs']
+ - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
name: "{{ openshift_hosted_registry_name }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
selector: "{{ openshift_hosted_registry_selector }}"
- replicas: "{{ openshift_hosted_registry_replicas }}"
+ replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}"
service_account: "{{ openshift_hosted_registry_serviceaccount }}"
- images: "{{ openshift_hosted_registry_images }}"
+ images: "{{ openshift_hosted_registry_registryurl }}"
env_vars: "{{ openshift_hosted_registry_env_vars }}"
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
@@ -144,14 +139,14 @@
namespace: "{{ openshift_hosted_registry_namespace }}"
- name: Wait for pod (Registry)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
-- include: storage/glusterfs.yml
+- include_tasks: storage/glusterfs.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
+ - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
- name: Delete temp directory
file:
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index dd7053656..4e9219477 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,6 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
+ import_tasks: firewall.yml
vars:
l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
@@ -11,16 +11,14 @@
oc_obj:
state: list
kind: node
- namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
- selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+ namespace: "{{ openshift_hosted_router_namespace }}"
+ selector: "{{ openshift_hosted_router_selector }}"
register: router_nodes
- when: openshift.hosted.router.replicas | default(none) is none
+ when: openshift_hosted_router_replicas | default(none) is none
- name: set_fact replicas
set_fact:
- replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
- openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
- openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+ replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
copy:
@@ -42,8 +40,8 @@
signer_key: "{{ openshift_master_config_dir }}/ca.key"
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
- - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+ - "{{ openshift_master_default_subdomain }}"
+ - "*.{{ openshift_master_default_subdomain }}"
cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
with_items: "{{ openshift_hosted_routers }}"
@@ -102,7 +100,7 @@
with_items: "{{ openshift_hosted_routers }}"
- name: Wait for pod (Routers)
- include: wait_for_pod.yml
+ include_tasks: wait_for_pod.yml
vars:
l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index 174bc39a4..378ae32dc 100644
--- a/roles/openshift_hosted/tasks/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -1,18 +1,10 @@
---
-- name: Configure facts for docker-registry
- set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
- openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
- openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
-
- name: Include reencrypt route configuration
- include: secure/reencrypt.yml
- static: no
+ include_tasks: secure/reencrypt.yml
when: openshift_hosted_registry_routetermination == 'reencrypt'
- name: Include passthrough route configuration
- include: secure/passthrough.yml
- static: no
+ include_tasks: secure/passthrough.yml
when: openshift_hosted_registry_routetermination == 'passthrough'
- name: Fetch the docker-registry route
@@ -39,7 +31,7 @@
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- - "{{ openshift_hosted_registry_routehost }}"
+ - "{{ openshift_hosted_registry_routehost | default(omit) }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index 7cae67baa..18b2edcc6 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -17,7 +17,7 @@
until:
- "registry_pods.results.results[0]['items'] | count > 0"
# There must be as many matching pods with 'Ready' status True as there are expected replicas
- - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
delay: 10
retries: "{{ (600 / 10) | int }}"
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
@@ -60,7 +60,7 @@
- name: Copy current registry contents to new GlusterFS volume
command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
- when: openshift.hosted.registry.storage.glusterfs.swapcopy
+ when: openshift_hosted_registry_storage_glusterfs_swapcopy
- name: Swap new GlusterFS registry volume
oc_volume:
@@ -68,7 +68,7 @@
name: "{{ openshift_hosted_registry_name }}"
vol_name: registry-storage
mount_type: pvc
- claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
- name: Deactivate registry maintenance mode
oc_env:
@@ -77,7 +77,7 @@
state: absent
env_vars:
- REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
- when: openshift.hosted.registry.storage.glusterfs.swap
+ when: openshift_hosted_registry_storage_glusterfs_swap
- name: Unmount registry volume and clean up mount point/fstab
mount:
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 0f4381748..bd7181c17 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -10,7 +10,7 @@
dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry service and endpoint
- command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}"
+ command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..a8c26fb51 100644
--- a/roles/openshift_hosted/tasks/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
@@ -1,6 +1,6 @@
---
-- include: s3.yml
- when: openshift.hosted.registry.storage.provider == 's3'
+- include_tasks: s3.yml
+ when: openshift_hosted_registry_storage_provider == 's3'
- name: Ensure the registry secret exists
oc_secret:
diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 8e905d905..4c100ee4e 100644
--- a/roles/openshift_hosted/tasks/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -2,8 +2,8 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_bucket | default(none) is not none
+ - openshift_hosted_registry_storage_s3_region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
index 607d25533..3c874d910 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Endpoints
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
subsets:
- addresses:
{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
index 452c7c3e1..f18c94a4f 100644
--- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
+++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
spec:
ports:
- port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..3c874d910
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+subsets:
+- addresses:
+{% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
+ - ip: {{ ip }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..f18c94a4f
--- /dev/null
+++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml
deleted file mode 100644
index dd2de07bc..000000000
--- a/roles/openshift_hosted_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Andrew Butcher
- description: OpenShift Hosted Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
deleted file mode 100644
index 8fc70cecb..000000000
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# openshift_*_selector variables have been deprecated in favor of
-# openshift_hosted_*_selector variables.
-- set_fact:
- openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined
-- set_fact:
- openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
- when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined
-
-- name: Set hosted facts
- openshift_facts:
- role: "{{ item }}"
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md
deleted file mode 100644
index c2af3c494..000000000
--- a/roles/openshift_hosted_metrics/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-OpenShift Metrics with Hawkular
-====================
-
-OpenShift Metrics Installation
-
-Requirements
-------------
-
-* Ansible 2.2
-* It requires subdomain fqdn to be set.
-* If persistence is enabled, then it also requires NFS.
-
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value | |
-|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
-| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed |
-| openshift_hosted_metrics_public_url | null | Hawkular metrics public url |
-| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. |
-| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir |
-| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size |
-| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. |
-| openshift_hosted_metrics_duration | `7` | Metrics query duration |
-| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
-
-
-Dependencies
-------------
-openshift_facts
-openshift_examples
-openshift_master_facts
-
-Example Playbook
-----------------
-
-```
-- name: Configure openshift-metrics
- hosts: oo_first_master
- roles:
- - role: openshift_hosted_metrics
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
deleted file mode 100644
index a01f24df8..000000000
--- a/roles/openshift_hosted_metrics/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
deleted file mode 100644
index 074b72942..000000000
--- a/roles/openshift_hosted_metrics/handlers/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- notify: Verify API Server
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
deleted file mode 100644
index debca3ca6..000000000
--- a/roles/openshift_hosted_metrics/meta/main.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: David Martín
- description:
- company:
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies:
-- { role: openshift_examples }
-- { role: openshift_facts }
-- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
deleted file mode 100644
index 15dd1bd54..000000000
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ /dev/null
@@ -1,132 +0,0 @@
----
-
-- name: Test if metrics-deployer service account exists
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace=openshift-infra
- get serviceaccount metrics-deployer -o json
- register: serviceaccount
- changed_when: false
- failed_when: false
-
-- name: Create metrics-deployer Service Account
- shell: >
- echo {{ metrics_deployer_sa | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- create -f -
- when: serviceaccount.rc == 1
-
-- name: Test edit permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
- register: edit_rolebindings
- changed_when: false
-
-- name: Add edit permission to the openshift-infra project to metrics-deployer SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user edit
- system:serviceaccount:openshift-infra:metrics-deployer
- when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
-
-- name: Test hawkular view permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
- register: view_rolebindings
- changed_when: false
-
-- name: Add view permissions to hawkular SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-role-to-user view
- system:serviceaccount:openshift-infra:hawkular
- when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
-
-- name: Test cluster-reader permissions
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
- register: cluster_reader_clusterrolebindings
- changed_when: false
-
-- name: Add cluster-reader permission to the openshift-infra project to heapster SA
- command: >
- {{ openshift.common.client_binary }} adm
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- policy add-cluster-role-to-user cluster-reader
- system:serviceaccount:openshift-infra:heapster
- when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
-
-- name: Create metrics-deployer secret
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- secrets new metrics-deployer nothing=/dev/null
- register: metrics_deployer_secret
- changed_when: metrics_deployer_secret.rc == 0
- failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
-
-# TODO: extend this to allow user passed in certs or generating cert with
-# OpenShift CA
-- name: Build metrics deployer command
- set_fact:
- deployer_cmd: "{{ openshift.common.client_binary }} process -f \
- {{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \
- -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
- -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
- -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
- -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
- {{ image_prefix }} \
- {{ image_version }} \
- -v MODE={{ deployment_mode }} \
- | {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_hosted_metrics_kubeconfig }} \
- create -o name -f -"
-
-- name: Deploy Metrics
- shell: "{{ deployer_cmd }}"
- register: deploy_metrics
- failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
- changed_when: deploy_metrics.rc == 0
-
-- set_fact:
- deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
-
-# TODO: re-enable this once the metrics deployer validation issue is fixed
-# when using dynamically provisioned volumes
-- name: "Wait for image pull and deployer pod"
- shell: >
- {{ openshift.common.client_binary }}
- --namespace openshift-infra
- --config={{ openshift_hosted_metrics_kubeconfig }}
- get {{ deploy_metrics.stdout }}
- register: deploy_result
- until: "{{ 'Completed' in deploy_result.stdout }}"
- failed_when: False
- retries: 60
- delay: 10
-
-- name: Configure master for metrics
- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: assetConfig.metricsPublicURL
- yaml_value: "{{ openshift_hosted_metrics_deploy_url }}"
- notify: restart master
diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
deleted file mode 100644
index 5ce8aa92b..000000000
--- a/roles/openshift_hosted_metrics/tasks/main.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Record kubeconfig tmp dir
- set_fact:
- openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
- changed_when: False
-
-- name: Set hosted metrics facts
- openshift_facts:
- role: hosted
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.metrics.*'
-
-- set_fact:
- metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
- metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
- metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
- image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
-
-
-- name: Check for existing metrics pods
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l {{ item }} | grep -q Running
- register: metrics_pods_status
- with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
- failed_when: false
- changed_when: false
-
-- name: Check for previous deployer
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
- register: metrics_deployer_status
- failed_when: false
- changed_when: false
-
-- name: Record current deployment status
- set_fact:
- greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
- failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
- metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
-
-- name: Set deployment mode
- set_fact:
- deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
-
-# TODO: handle non greenfield deployments in the future
-- include: install.yml
- when: greenfield
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
deleted file mode 100644
index 6c207d6ac..000000000
--- a/roles/openshift_hosted_metrics/vars/main.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-
-metrics_deployer_sa:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
-
-
-hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
-
-hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
-
-hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-
-metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 57121447d..0343a7eb0 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 497c6e0c5..2f1aa061f 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi
openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
-openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_cpu_request: 100m
@@ -54,7 +54,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_cpu_request: 100m
@@ -109,7 +109,7 @@ openshift_logging_es_config: {}
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
-openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es certs
@@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
openshift_logging_es_ops_allow_external: False
-openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}"
#The absolute path on the control node to the cert file to use
#for the public facing es-ops certs
@@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce']
# mux - secure_forward listener service
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}"
openshift_logging_mux_port: 24284
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_memory_limit: 512Mi
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index 074b72942..1f4b5a116 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -1,17 +1,17 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index f526fd734..082c0128f 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -36,7 +36,7 @@
- top_dir: '{{generated_certs_dir}}'
when: not signing_conf_file.stat.exists
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -45,7 +45,7 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -53,14 +53,14 @@
hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
when: openshift_logging_use_mux | bool
-- include: procure_shared_key.yaml
+- include_tasks: procure_shared_key.yaml
loop_control:
loop_var: shared_key_info
with_items:
- procure_component: mux
when: openshift_logging_use_mux | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -68,7 +68,7 @@
hostnames: "es, {{openshift_logging_es_hostname}}"
when: openshift_logging_es_allow_external | bool
-- include: procure_server_certs.yaml
+- include_tasks: procure_server_certs.yaml
loop_control:
loop_var: cert_info
with_items:
@@ -109,7 +109,7 @@
- not ca_cert_srl_file.stat.exists
- name: Generate PEM certs
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.fluentd
- system.logging.kibana
@@ -119,7 +119,7 @@
loop_var: node_name
- name: Generate PEM cert for mux
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.mux
loop_control:
@@ -127,7 +127,7 @@
when: openshift_logging_use_mux | bool
- name: Generate PEM cert for Elasticsearch external route
- include: generate_pems.yaml component={{node_name}}
+ include_tasks: generate_pems.yaml component={{node_name}}
with_items:
- system.logging.es
loop_control:
@@ -135,7 +135,7 @@
when: openshift_logging_es_allow_external | bool
- name: Creating necessary JKS certs
- include: generate_jks.yaml
+ include_tasks: generate_jks.yaml
# TODO: make idempotent
- name: Generate proxy session
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 2fefdc894..bb8ebec6b 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -52,7 +52,7 @@
changed_when: False
check_mode: no
-- include: generate_certs.yaml
+- include_tasks: generate_certs.yaml
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -250,7 +250,7 @@
when:
- openshift_logging_use_ops | bool
-- include: annotate_ops_projects.yaml
+- include_tasks: annotate_ops_projects.yaml
## Curator
- include_role:
@@ -311,4 +311,4 @@
openshift_logging_install_eventrouter | default(false) | bool
-- include: update_master_config.yaml
+- include_tasks: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 2571548d6..9949bb95d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -19,11 +19,11 @@
check_mode: no
become: no
-- include: install_logging.yaml
+- include_tasks: install_logging.yaml
when:
- openshift_logging_install_logging | default(false) | bool
-- include: delete_logging.yaml
+- include_tasks: delete_logging.yaml
when:
- not openshift_logging_install_logging | default(false) | bool
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
index 6752fb7f9..d4635aab0 100644
--- a/roles/openshift_logging_curator/meta/main.yaml
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 7ddf57450..e7ef5ff22 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -12,7 +12,7 @@
openshift_logging_curator_image_prefix: "{{ openshift_logging_curator_image_prefix | default(__openshift_logging_curator_image_prefix) }}"
openshift_logging_curator_image_version: "{{ openshift_logging_curator_image_version | default(__openshift_logging_curator_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 097270772..6a9a6539c 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 770892d52..8f2050043 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -30,7 +30,7 @@
openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_elasticsearch_image_prefix | default(__openshift_logging_elasticsearch_image_prefix) }}"
openshift_logging_elasticsearch_image_version: "{{ openshift_logging_elasticsearch_image_version | default(__openshift_logging_elasticsearch_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 0bfa9e85b..bf04094a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -9,6 +9,7 @@ metadata:
logging-infra: "{{logging_component}}"
spec:
replicas: {{es_replicas|default(1)}}
+ revisionHistoryLimit: 0
selector:
provider: openshift
component: "{{component}}"
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
index b1f93eeb9..96b181d61 100644
--- a/roles/openshift_logging_eventrouter/tasks/main.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -12,8 +12,8 @@
openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_eventrouter_image_prefix | default(__openshift_logging_eventrouter_image_prefix) }}"
openshift_logging_eventrouter_image_version: "{{ openshift_logging_eventrouter_image_version | default(__openshift_logging_eventrouter_image_version) }}"
-- include: "{{ role_path }}/tasks/install_eventrouter.yaml"
+- include_tasks: install_eventrouter.yaml
when: openshift_logging_install_eventrouter | default(false) | bool
-- include: "{{ role_path }}/tasks/delete_eventrouter.yaml"
+- include_tasks: delete_eventrouter.yaml
when: not openshift_logging_install_eventrouter | default(false) | bool
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
index 2003aacb2..89c98204f 100644
--- a/roles/openshift_logging_fluentd/meta/main.yaml
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index f8683ab75..87eedfb4b 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -47,7 +47,7 @@
openshift_logging_fluentd_image_prefix: "{{ openshift_logging_fluentd_image_prefix | default(__openshift_logging_fluentd_image_prefix) }}"
openshift_logging_fluentd_image_version: "{{ openshift_logging_fluentd_image_version | default(__openshift_logging_fluentd_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
@@ -216,7 +216,7 @@
openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
-- include: label_and_wait.yaml
+- include_tasks: label_and_wait.yaml
vars:
node: "{{ fluentd_host }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index 6cdf7c8f3..899193838 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain) }}"
openshift_logging_kibana_es_host: "logging-es"
openshift_logging_kibana_es_port: 9200
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
index 89e08abc0..d97586a37 100644
--- a/roles/openshift_logging_kibana/meta/main.yaml
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 9d99114c5..77bf8042a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -15,7 +15,7 @@
openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_kibana_proxy_image_prefix | default(__openshift_logging_kibana_proxy_image_prefix) }}"
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_kibana_proxy_image_version | default(__openshift_logging_kibana_proxy_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index cd15da939..1e6c501bf 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ
openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
index f40beb79d..f271d8d7d 100644
--- a/roles/openshift_logging_mux/meta/main.yaml
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 242d92188..68948bce2 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -20,7 +20,7 @@
openshift_logging_mux_image_prefix: "{{ openshift_logging_mux_image_prefix | default(__openshift_logging_mux_image_prefix) }}"
openshift_logging_mux_image_version: "{{ openshift_logging_mux_image_version | default(__openshift_logging_mux_image_version) }}"
-- include: determine_version.yaml
+- include_tasks: determine_version.yaml
# allow passing in a tempdir
- name: Create temp directory for doing work in
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 96de82669..974d9781a 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -164,14 +164,14 @@ away.
If you want to install CFME/MIQ at the same time you install your
OCP/Origin cluster, ensure that `openshift_management_install_management` is set
to `true` in your inventory. Call the standard
-`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ
installation.
If you are installing CFME/MIQ on an *already provisioned cluster*
then you can call the CFME/MIQ playbook directly:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml
```
*Note: Use `miq-template` in the following examples for ManageIQ installs*
@@ -489,7 +489,7 @@ This playbook will:
```
-$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml
```
## Multiple Providers
@@ -567,7 +567,7 @@ the config file path.
```
$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
- playbooks/byo/openshift-management/add_many_container_providers.yml
+ playbooks/openshift-management/add_many_container_providers.yml
```
Afterwards you will find two new container providers in your
@@ -579,7 +579,7 @@ to see an overview.
This role includes a playbook to uninstall and erase the CFME/MIQ
installation:
-* `playbooks/byo/openshift-management/uninstall.yml`
+* `playbooks/openshift-management/uninstall.yml`
NFS export definitions and data stored on NFS exports are not
automatically removed. You are urged to manually erase any data from
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index e768961ce..b5e234b7f 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false
# name and password AND are trying to use integration scripts.
#
# For example, adding this cluster as a container provider,
-# playbooks/byo/openshift-management/add_container_provider.yml
+# playbooks/openshift-management/add_container_provider.yml
openshift_management_username: admin
openshift_management_password: smartvm
diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml
index 07ad51126..9f19704a8 100644
--- a/roles/openshift_management/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 3bade9e8c..f212dba7c 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -3,7 +3,7 @@
# Users, projects, and privileges
- name: Run pre-install Management validation checks
- include: validate.yml
+ include_tasks: validate.yml
# This creates a service account allowing Container Provider
# integration (managing OCP/Origin via MIQ/Management)
@@ -18,18 +18,18 @@
display_name: "{{ openshift_management_project_description }}"
- name: Create and Authorize Management Accounts
- include: accounts.yml
+ include_tasks: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
- name: Determine the correct NFS host if required
- include: storage/nfs_server.yml
+ include_tasks: storage/nfs_server.yml
when: openshift_management_storage_class in ['nfs', 'nfs_external']
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
- name: Create required NFS exports for Management app storage
- include: storage/nfs.yml
+ include_tasks: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
#---------------------------------------------------------------------
@@ -56,14 +56,14 @@
######################################################################
# APPLICATION TEMPLATE
- name: Install the Management app and PV templates
- include: template.yml
+ include_tasks: template.yml
######################################################################
# APP & DB Storage
# For local/external NFS backed installations
- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}"
- include: storage/create_nfs_pvs.yml
+ include_tasks: storage/create_nfs_pvs.yml
when:
- openshift_management_storage_class in ['nfs', 'nfs_external']
diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml
index d8bf7aa3e..a3675b29b 100644
--- a/roles/openshift_management/tasks/storage/storage.yml
+++ b/roles/openshift_management/tasks/storage/storage.yml
@@ -1,3 +1,3 @@
---
-- include: nfs.yml
+- include_tasks: nfs.yml
when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 359536202..557bfe022 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,24 +1,22 @@
---
- name: restart master api
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when:
- not (master_api_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
notify:
- Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when:
- not (master_controllers_service_status_changed | default(false) | bool)
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a1cda2ad4..bf0cbbf18 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,3 +15,4 @@ dependencies:
- role: lib_openshift
- role: lib_utils
- role: lib_os_firewall
+- role: openshift_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index e52cd6231..9be5508aa 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,31 +11,12 @@
- openshift_master_oauth_grant_method is defined
- openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
-# HA Variable Validation
-- fail:
- msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
- when:
- - openshift.master.ha | bool
- - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
-- fail:
- msg: "openshift_master_cluster_password must be set for multi-master installations"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
-- fail:
- msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "pacemaker"
- - openshift.common.is_containerized | bool
-
- name: Open up firewall ports
import_tasks: firewall.yml
- name: Install Master package
package:
- name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- not openshift.common.is_containerized | bool
@@ -160,7 +141,7 @@
# The template file will stomp any other settings made.
- block:
- name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
failed_when: false
changed_when: false
register: l_already_set
@@ -222,11 +203,10 @@
- name: Start and enable master api on first master
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -234,29 +214,26 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname == openshift_master_hosts[0]
- pause:
seconds: 15
when:
- openshift.master.ha | bool
- - openshift.master.cluster_method == 'native'
- name: Start and enable master api all masters
systemd:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
enabled: yes
state: started
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
@@ -264,67 +241,39 @@
delay: 60
- name: Dump logs from master-api if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
when:
- l_start_result | failed
- set_fact:
master_api_service_status_changed: "{{ l_start_result | changed }}"
when:
- - openshift.master.cluster_method == 'native'
- inventory_hostname != openshift_master_hosts[0]
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
- include_tasks: check_master_api_is_ready.yml
when:
- - openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
- name: Start and enable master controller service
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
enabled: yes
state: started
- when:
- - openshift.master.cluster_method == 'native'
register: l_start_result
until: not l_start_result | failed
retries: 1
delay: 60
- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
when:
- l_start_result | failed
- name: Set fact master_controllers_service_status_changed
set_fact:
master_controllers_service_status_changed: "{{ l_start_result | changed }}"
- when:
- - openshift.master.cluster_method == 'native'
-
-- name: Install cluster packages
- package: name=pcs state=present
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
- register: l_install_result
- until: l_install_result | success
-
-- name: Start and enable cluster service
- systemd:
- name: pcsd
- enabled: yes
- state: started
- when:
- - openshift.master.cluster_method == 'pacemaker'
- - not openshift.common.is_containerized | bool
-
-- name: Set the cluster user password
- shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
- when:
- - l_install_result | changed
- name: node bootstrap settings
include_tasks: bootstrap.yml
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index c95f562d0..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,8 +32,8 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: master_oreg_auth_credentials_create
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
- restart master controllers
@@ -45,4 +45,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool
+ - >
+ (master_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or master_oreg_auth_credentials_create.changed
+ or master_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 4f8b758fd..715347101 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -1,7 +1,7 @@
---
- name: Restart master API
service:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
state: restarted
when: openshift_master_ha | bool
- name: Wait for master API to come back online
@@ -14,7 +14,7 @@
when: openshift_master_ha | bool
- name: Restart master controllers
service:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: restarted
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 23386f11b..f6c5ce0dd 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull master system container image
command: >
@@ -12,12 +8,12 @@
- name: Check Master system container package
command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
+ atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
# HA
- name: Install or Update HA api master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-api"
+ name: "{{ openshift_service_type }}-master-api"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
@@ -25,7 +21,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 9d11ed574..76b6f46aa 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -13,7 +13,7 @@
- name: Disable the legacy master service if it exists
systemd:
- name: "{{ openshift.common.service_type }}-master"
+ name: "{{ openshift_service_type }}-master"
state: stopped
enabled: no
masked: yes
@@ -21,11 +21,10 @@
- name: Remove the legacy master service if it exists
file:
- path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service"
state: absent
ignore_errors: true
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
# This is the image used for both HA and non-HA clusters:
@@ -41,9 +40,8 @@
- name: Create the ha systemd unit files
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
- dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
+ dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service"
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
with_items:
- api
@@ -57,106 +55,89 @@
- name: enable master services
systemd:
- name: "{{ openshift.common.service_type }}-master-{{ item }}"
+ name: "{{ openshift_service_type }}-master-{{ item }}"
enabled: yes
with_items:
- api
- controllers
when:
- - openshift.master.cluster_method == "native"
- not l_is_master_system_container | bool
- name: Preserve Master API Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api
register: l_master_api_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master API AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api
register: master_api_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master api
- name: Restore Master API Proxy Config Options
when:
- - openshift.master.cluster_method == "native"
- l_master_api_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
- name: Restore Master API AWS Options
when:
- - openshift.master.cluster_method == "native"
- master_api_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
no_log: True
- name: Preserve Master Controllers Proxy Config options
- command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_proxy
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Preserve Master Controllers AWS options
- command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers
register: master_controllers_aws
- when:
- - openshift.master.cluster_method == "native"
failed_when: false
changed_when: false
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
backup: true
- when:
- - openshift.master.cluster_method == "native"
notify:
- restart master controllers
- name: Restore Master Controllers Proxy Config Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_proxy.rc == 0
- "'http_proxy' not in openshift.common"
- "'https_proxy' not in openshift.common"
- name: Restore Master Controllers AWS Options
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers
line: "{{ item }}"
with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
when:
- - openshift.master.cluster_method == "native"
- master_controllers_aws.rc == 0
- not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index caab3045a..f50b91ff5 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -12,11 +12,11 @@
package: name={{ master_pkgs | join(',') }} state=present
vars:
master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result | success
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index cec3d3fb1..5e46d9121 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-api \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ --name {{ openshift_service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-api
+SyslogIdentifier={{ openshift_service_type }}-master-api
Restart=always
RestartSec=5s
[Install]
WantedBy={{ openshift_docker_service_name }}.service
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a0248151d..899575f1a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -1,19 +1,19 @@
[Unit]
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
-Wants={{ openshift.common.service_type }}-master-api.service
-After={{ openshift.common.service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
After={{ openshift_docker_service_name }}.service
Requires={{ openshift_docker_service_name }}.service
PartOf={{ openshift_docker_service_name }}.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers
ExecStart=/usr/bin/docker run --rm --privileged --net=host \
- --name {{ openshift.common.service_type }}-master-controllers \
- --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ --name {{ openshift_service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \
-v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
-v /var/run/docker.sock:/var/run/docker.sock \
-v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
@@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index a0f00e545..f1a76e5f5 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -69,29 +69,13 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
- ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+ ca: master.etcd-ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
{% for etcd_url in openshift.master.etcd_urls %}
- {{ etcd_url }}
{% endfor %}
-{% if openshift.master.embedded_etcd | bool %}
-etcdConfig:
- address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
- peerAddress: {{ openshift.common.hostname }}:7001
- peerServingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:7001
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- servingInfo:
- bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
- certFile: etcd.server.crt
- clientCA: ca-bundle.crt
- keyFile: etcd.server.key
- storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
-{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
@@ -120,7 +104,7 @@ kubernetesMasterConfig:
- application/vnd.kubernetes.protobuf
{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
- masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
+ masterCount: {{ openshift.master.master_count }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
@@ -204,7 +188,7 @@ projectConfig:
mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
uidAllocatorRange: "{{ osm_uid_allocator_range }}"
routingConfig:
- subdomain: "{{ openshift_master_default_subdomain | default("") }}"
+ subdomain: "{{ openshift_master_default_subdomain }}"
serviceAccountConfig:
limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
managedNames:
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 02bfd6f62..ed8a47df8 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API
Documentation=https://github.com/openshift/origin
After=network-online.target
After=etcd.service
-Before={{ openshift.common.service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
@@ -20,4 +20,4 @@ RestartSec=5s
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service
+WantedBy={{ openshift_service_type }}-node.service
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index fae021845..b36963f73 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -2,19 +2,19 @@
Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
After=network-online.target
-After={{ openshift.common.service_type }}-master-api.service
-Wants={{ openshift.common.service_type }}-master-api.service
+After={{ openshift_service_type }}-master-api.service
+Wants={{ openshift_service_type }}-master-api.service
Requires=network-online.target
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ r_openshift_master_data_dir }}
-SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+SyslogIdentifier={{ openshift_service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md
deleted file mode 100644
index 58dd19ac3..000000000
--- a/roles/openshift_master_cluster/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Master Cluster
-========================
-
-TODO
-
-Requirements
-------------
-
-* Ansible 2.2
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml
deleted file mode 100644
index c452b165e..000000000
--- a/roles/openshift_master_cluster/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description:
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
- - system
-dependencies: []
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
deleted file mode 100644
index 1b94598dd..000000000
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- fail:
- msg: This role requires that openshift_master_cluster_vip is set
- when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip
-- fail:
- msg: This role requires that openshift_master_cluster_public_vip is set
- when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip
-
-- name: Authenticate to the cluster
- command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }}
-
-- name: Create the cluster
- command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }}
-
-- name: Start the cluster
- command: pcs cluster start --all
-
-- name: Enable the cluster on all nodes
- command: pcs cluster enable --all
-
-- name: Set default resource stickiness
- command: pcs resource defaults resource-stickiness=100
-
-- name: Add the cluster VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
-
-- name: Add the cluster public VIP resource
- command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
- when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
-
-- name: Add the cluster master service resource
- command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
-
-- name: Disable stonith
- command: pcs property set stonith-enabled=false
-
-- name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: "{{ openshift.master.api_port }}"
- state: started
- timeout: 180
- delay: 90
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
deleted file mode 100644
index 41bfc72cb..000000000
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- fail:
- msg: "Not possible on atomic hosts for now"
- when: openshift.common.is_containerized | bool
-
-- name: Test if cluster is already configured
- command: pcs status
- register: pcs_status
- changed_when: false
- failed_when: false
- when: openshift.master.cluster_method == "pacemaker"
-
-- include_tasks: configure.yml
- when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index d0dcdae4b..a89f48afa 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,5 +1,4 @@
---
-openshift_master_default_subdomain: "router.default.svc.cluster.local"
openshift_master_admission_plugin_config:
openshift.io/ImagePolicy:
configuration:
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index c827f2d26..ff15f693b 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -485,31 +485,6 @@ class FilterModule(object):
Dumper=AnsibleDumper))
@staticmethod
- def validate_pcs_cluster(data, masters=None):
- ''' Validates output from "pcs status", ensuring that each master
- provided is online.
- Ex: data = ('...',
- 'PCSD Status:',
- 'master1.example.com: Online',
- 'master2.example.com: Online',
- 'master3.example.com: Online',
- '...')
- masters = ['master1.example.com',
- 'master2.example.com',
- 'master3.example.com']
- returns True
- '''
- if not issubclass(type(data), string_types):
- raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
- if not issubclass(type(masters), list):
- raise errors.AnsibleFilterError("|failed expects masters is a list")
- valid = True
- for master in masters:
- if "{0}: Online".format(master) not in data:
- valid = False
- return valid
-
- @staticmethod
def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
@@ -553,6 +528,5 @@ class FilterModule(object):
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
- "validate_pcs_cluster": self.validate_pcs_cluster,
"certificates_to_synchronize": self.certificates_to_synchronize,
"oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 20cc5358e..418dcba67 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -1,14 +1,8 @@
---
-# Ensure the default sub-domain is set:
-- name: Migrate legacy osm_default_subdomain fact
- set_fact:
- openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}"
- when: openshift_master_default_subdomain is not defined
-
- name: Verify required variables are set
fail:
msg: openshift_master_default_subdomain must be set to deploy metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == ""
+ when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == ""
# NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
# to properly configure the master-config.yaml file.
@@ -20,7 +14,7 @@
- name: Set g_metrics_hostname
set_fact:
g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
- | default('hawkular-metrics.' ~ (openshift_master_default_subdomain))
+ | default('hawkular-metrics.' ~ openshift_master_default_subdomain)
| oo_hostname_from_url }}"
- set_fact:
@@ -31,7 +25,6 @@
openshift_facts:
role: master
local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
@@ -52,7 +45,6 @@
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
- embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index 074b72942..1f4b5a116 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,17 +1,17 @@
---
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+ when: (not (master_api_service_status_changed | default(false) | bool))
notify: Verify API Server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ when: (not (master_controllers_service_status_changed | default(false) | bool))
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
index edb854467..1aa7e7079 100644
--- a/roles/openshift_nfs/tasks/setup.yml
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install nfs-utils
package: name=nfs-utils state=present
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f3867fe4a..fff927944 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 229c6bbed..170a3dc6e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
- name: restart node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
register: l_openshift_node_restart_node_result
until: not l_openshift_node_restart_node_result | failed
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
index 38c2b794d..a7f1fc116 100644
--- a/roles/openshift_node/tasks/aws.yml
+++ b/roles/openshift_node/tasks/aws.yml
@@ -1,7 +1,7 @@
---
- name: Configure AWS Cloud Provider Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 741a2234f..e5c80bd09 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -38,7 +38,7 @@
- name: Configure Node Environment Variables
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
@@ -76,7 +76,7 @@
- name: Start and enable node dep
systemd:
daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
+ name: "{{ openshift_service_type }}-node-dep"
enabled: yes
state: started
@@ -84,7 +84,7 @@
block:
- name: Start and enable node
systemd:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
enabled: yes
state: started
daemon_reload: yes
@@ -95,7 +95,7 @@
ignore_errors: true
- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
when: node_start_result | failed
- name: Abort if node failed to start
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 527580481..ebc1426d3 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Node settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
index d60794305..7ddd319d2 100644
--- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml
@@ -1,7 +1,7 @@
---
- name: Configure Proxy Settings
lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ dest: /etc/sysconfig/{{ openshift_service_type }}-node
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
create: true
diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
index ee91a88ab..9f1145d12 100644
--- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node dependencies docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service"
src: openshift.docker.node.dep.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..649fc5f6b 100644
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -1,7 +1,7 @@
---
- name: Install Node docker service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: openshift.docker.node.service
notify:
- reload systemd units
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index d743d2188..bbe9c71f5 100644
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -1,8 +1,7 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
# - docker_version
# - skip_docker_restart
@@ -12,20 +11,6 @@
- debug: var=docker_image_count.stdout
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
- script: nuke_images.sh
- register: nuke_images_result
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
- service:
name: docker
state: stopped
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 1ed4a05c1..f93aed246 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,14 +3,14 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
register: result
until: result | success
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e60d96760..32c5f495f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,7 +6,7 @@
- deployment_type == 'openshift-enterprise'
- not openshift_use_crio
-- include: dnsmasq.yml
+- include_tasks: dnsmasq.yml
- name: setup firewall
import_tasks: firewall.yml
@@ -50,6 +50,8 @@
enabled: yes
state: restarted
when: openshift_use_crio
+ register: task_result
+ failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index eb8d9a6a5..98978ec6f 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull node system container image
command: >
@@ -12,10 +8,10 @@
- name: Install or Update node system container
oc_atomic_container:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- - "MASTER_SERVICE={{ openshift.common.service_type }}.service"
+ - "MASTER_SERVICE={{ openshift_service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index d33e172c1..b61bc84c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -7,11 +7,6 @@
l_service_name: "{{ openshift_docker_service_name }}"
when: not openshift_use_crio
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index f5428867a..ab43ec049 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
- openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
+ register: node_oreg_auth_credentials_create_alt
notify:
- restart node
@@ -43,4 +43,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
+ - >
+ (node_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or node_oreg_auth_credentials_create.changed
+ or node_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 397e1ba18..c532147b1 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,7 +1,7 @@
---
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 561b56918..9f333645a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -17,7 +17,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
@@ -26,8 +26,8 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
- etcd_container
failed_when: false
when: openshift.common.is_containerized | bool
@@ -80,9 +80,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -91,7 +91,7 @@
name: "{{ item }}"
state: stopped
with_items:
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 3f1abceab..65c301783 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,6 +1,6 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - openshift.common.is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
@@ -27,9 +27,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- name: Wait for master API to come back online
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index fcbe1a598..120b93bc3 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -1,13 +1,13 @@
---
# input variables:
-# - openshift.common.service_type
+# - openshift_service_type
# - component
# - openshift_pkg_version
# - openshift.common.is_atomic
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
register: result
until: result | success
@@ -19,7 +19,7 @@
- name: Install Node service file
template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 5964ac095..8b43beb07 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,11 +1,11 @@
[Unit]
Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
+PartOf={{ openshift_service_type }}-node.service
+Before={{ openshift_service_type }}-node.service
{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
+SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b33ca542..b174c7023 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,5 +1,5 @@
[Unit]
-After={{ openshift.common.service_type }}-master.service
+After={{ openshift_service_type }}-master.service
After={{ openshift_docker_service_name }}.service
After=openvswitch.service
PartOf={{ openshift_docker_service_name }}.service
@@ -10,20 +10,20 @@ PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
+Wants={{ openshift_service_type }}-master.service
+Requires={{ openshift_service_type }}-node-dep.service
+After={{ openshift_service_type }}-node-dep.service
Requires=dnsmasq.service
After=dnsmasq.service
[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
-v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
@@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
+ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 5f182e0d6..65a647b8f 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -4,11 +4,9 @@ openshift_openstack_stack_state: 'present'
openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0
openshift_openstack_node_ingress_cidr: 0.0.0.0/0
openshift_openstack_lb_ingress_cidr: 0.0.0.0/0
-openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0
openshift_openstack_num_etcd: 0
openshift_openstack_num_masters: 1
openshift_openstack_num_nodes: 1
-openshift_openstack_num_dns: 0
openshift_openstack_num_infra: 1
openshift_openstack_dns_nameservers: []
openshift_openstack_nodes_to_remove: []
@@ -45,8 +43,10 @@ openshift_openstack_container_storage_setup:
# populate-dns
openshift_openstack_dns_records_add: []
-openshift_openstack_external_nsupdate_keys: {}
+openshift_openstack_public_hostname_suffix: ""
+openshift_openstack_private_hostname_suffix: ""
+openshift_openstack_public_dns_domain: "example.com"
openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
openshift_openstack_app_subdomain: "apps"
@@ -60,20 +60,17 @@ openshift_openstack_infra_hostname: infra-node
openshift_openstack_node_hostname: app-node
openshift_openstack_lb_hostname: lb
openshift_openstack_etcd_hostname: etcd
-openshift_openstack_dns_hostname: dns
openshift_openstack_keypair_name: openshift
openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}"
-openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}"
-openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_provider_network_name: null
openshift_openstack_external_network_name: null
openshift_openstack_private_network: >-
@@ -89,8 +86,5 @@ openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_si
openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_etcd_volume_size: 2
-openshift_openstack_dns_volume_size: 1
openshift_openstack_lb_volume_size: 5
-openshift_openstack_use_bastion: false
-openshift_openstack_ui_ssh_tunnel: false
openshift_openstack_ephemeral_volumes: false
diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml
index 57c7238d1..30996cc47 100644
--- a/roles/openshift_openstack/tasks/check-prerequisites.yml
+++ b/roles/openshift_openstack/tasks/check-prerequisites.yml
@@ -32,10 +32,12 @@
command: python -c "import dns"
ignore_errors: yes
register: pythondns_result
+ when: openshift_openstack_external_nsupdate_keys is defined
- name: Check if python-dns is installed
assert:
that: 'pythondns_result.rc == 0'
msg: "Python module python-dns is not installed"
+ when: openshift_openstack_external_nsupdate_keys is defined
# Check jinja2
- name: Try to import jinja2 module
@@ -85,21 +87,19 @@
msg: "Keypair {{ openshift_openstack_keypair_name }} is not available"
# Check that custom images are available
-- include: custom_image_check.yaml
+- include_tasks: custom_image_check.yaml
with_items:
- "{{ openshift_openstack_master_image }}"
- "{{ openshift_openstack_infra_image }}"
- "{{ openshift_openstack_node_image }}"
- "{{ openshift_openstack_lb_image }}"
- "{{ openshift_openstack_etcd_image }}"
- - "{{ openshift_openstack_dns_image }}"
# Check that custom flavors are available
-- include: custom_flavor_check.yaml
+- include_tasks: custom_flavor_check.yaml
with_items:
- "{{ openshift_openstack_master_flavor }}"
- "{{ openshift_openstack_infra_flavor }}"
- "{{ openshift_openstack_node_flavor }}"
- "{{ openshift_openstack_lb_flavor }}"
- "{{ openshift_openstack_etcd_flavor }}"
- - "{{ openshift_openstack_dns_flavor }}"
diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml
deleted file mode 100644
index e1a18425f..000000000
--- a/roles/openshift_openstack/tasks/hostname.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Setting Hostname Fact
- set_fact:
- new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}"
-
-- name: Setting FQDN Fact
- set_fact:
- new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}"
-
-- name: Setting hostname and DNS domain
- hostname: name="{{ new_fqdn }}"
-
-- name: Check for cloud.cfg
- stat: path=/etc/cloud/cloud.cfg
- register: cloud_cfg
-
-- name: Prevent cloud-init updates of hostname/fqdn (if applicable)
- lineinfile:
- dest: /etc/cloud/cloud.cfg
- state: present
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: '^ - set_hostname', line: '# - set_hostname' }
- - { regexp: '^ - update_hostname', line: '# - update_hostname' }
- when: cloud_cfg.stat.exists == True
diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml
index 89e58d830..59df2e396 100644
--- a/roles/openshift_openstack/tasks/node-configuration.yml
+++ b/roles/openshift_openstack/tasks/node-configuration.yml
@@ -4,8 +4,6 @@
msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'"
when: ansible_selinux.config_mode != "enforcing"
-- include: hostname.yml
+- include_tasks: container-storage-setup.yml
-- include: container-storage-setup.yml
-
-- include: node-network.yml
+- include_tasks: node-network.yml
diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml
index c03aceb94..cf2ead5c3 100644
--- a/roles/openshift_openstack/tasks/populate-dns.yml
+++ b/roles/openshift_openstack/tasks/populate-dns.yml
@@ -1,7 +1,7 @@
---
- name: "Generate list of private A records"
set_fact:
- private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}"
+ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
- name: "Add wildcard records to the private A records for infrahosts"
@@ -30,7 +30,6 @@
nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}"
nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['private'] is defined
@@ -44,10 +43,12 @@
key_secret: "{{ nsupdate_key_secret_private }}"
key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}"
entries: "{{ private_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['private'] is defined
- name: "Generate list of public A records"
set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}"
+ public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}"
with_items: "{{ groups['cluster_hosts'] }}"
when: hostvars[item]['public_v4'] is defined
@@ -63,15 +64,6 @@
when:
- hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- openshift_openstack_num_masters == 1
- - not openshift_openstack_use_bastion|bool
-
-- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)"
- set_fact:
- public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}"
- when:
- - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
- - openshift_openstack_num_masters == 1
- - openshift_openstack_use_bastion|bool
- name: "Add public master cluster hostname records to the public A records (multi-master)"
set_fact:
@@ -87,7 +79,6 @@
nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}"
nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}"
when:
- - openshift_openstack_external_nsupdate_keys is defined
- openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the public Add section for DNS"
@@ -100,11 +91,13 @@
key_secret: "{{ nsupdate_key_secret_public }}"
key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}"
entries: "{{ public_records }}"
+ when:
+ - openshift_openstack_external_nsupdate_keys['public'] is defined
- name: "Generate the final openshift_openstack_dns_records_add"
set_fact:
- openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}"
+ openshift_openstack_dns_records_add: "{{ private_named_records|default([]) + public_named_records|default([]) }}"
- name: "Add DNS A records"
@@ -120,7 +113,7 @@
# TODO(shadower): add a cleanup playbook that removes these records, too!
state: present
with_subelements:
- - "{{ openshift_openstack_dns_records_add | default({}) }}"
+ - "{{ openshift_openstack_dns_records_add | default([]) }}"
- entries
register: nsupdate_add_result
until: nsupdate_add_result|succeeded
diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml
index dccbe334c..b774bd620 100644
--- a/roles/openshift_openstack/tasks/provision.yml
+++ b/roles/openshift_openstack/tasks/provision.yml
@@ -1,6 +1,6 @@
---
- name: Generate the templates
- include: generate-templates.yml
+ include_tasks: generate-templates.yml
when:
- openshift_openstack_stack_state == 'present'
@@ -17,7 +17,7 @@
meta: refresh_inventory
- name: CleanUp
- include: cleanup.yml
+ include_tasks: cleanup.yml
when:
- openshift_openstack_stack_state == 'present'
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 0e7538629..8d13eb81e 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -54,25 +54,8 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
-{% if openshift_openstack_num_dns|int > 0 %}
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ips:
- description: Floating IPs of the DNS
- value: { get_attr: [ dns, floating_ip ] }
-
- dns_private_ips:
- description: Private IPs of the DNS
- value: { get_attr: [ dns, private_ip ] }
-{% endif %}
-
conditions:
- no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %}
+ no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %}
resources:
@@ -180,13 +163,6 @@ resources:
port_range_min: 22
port_range_max: 22
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% if openshift_openstack_use_bastion|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }}
-{% endif %}
- direction: ingress
protocol: icmp
remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
@@ -443,44 +419,7 @@ resources:
port_range_min: 443
port_range_max: 443
-{% if openshift_openstack_num_dns|int > 0 %}
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- rules:
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }}
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24"
-{% endif %}
-
-{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %}
+{% if openshift_openstack_num_masters|int > 1 %}
lb-secgrp:
type: OS::Neutron::SecurityGroup
properties:
@@ -491,20 +430,13 @@ resources:
protocol: tcp
port_range_min: {{ openshift_master_api_port | default(8443) }}
port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
-{% if openshift_openstack_ui_ssh_tunnel|bool %}
- - direction: ingress
- protocol: tcp
- port_range_min: {{ openshift_master_api_port | default(8443) }}
- port_range_max: {{ openshift_master_api_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
-{% endif %}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %}
- direction: ingress
protocol: tcp
port_range_min: {{ openshift_master_console_port | default(8443) }}
port_range_max: {{ openshift_master_console_port | default(8443) }}
- remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }}
+ remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
{% endif %}
{% endif %}
@@ -553,7 +485,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_etcd_volume_size }}
@@ -685,7 +617,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_master_volume_size }}
@@ -755,7 +687,7 @@ resources:
- no_floating
- null
- {{ openshift_openstack_external_network_name }}
-{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %}
+{% if openshift_openstack_provider_network_name %}
attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_node_volume_size }}
@@ -818,9 +750,6 @@ resources:
{% else %}
- { get_resource: node-secgrp }
{% endif %}
-{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %}
- - { get_resource: lb-secgrp }
-{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
{% if not openshift_openstack_provider_network_name %}
@@ -835,54 +764,3 @@ resources:
depends_on:
- interface
{% endif %}
-
-{% if openshift_openstack_num_dns|int > 0 %}
- dns:
- type: OS::Heat::ResourceGroup
- properties:
- count: {{ openshift_openstack_num_dns }}
- resource_def:
- type: server.yaml
- properties:
- name:
- str_replace:
- template: k8s_type-%index%.cluster_id
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
- k8s_type: {{ openshift_openstack_dns_hostname }}
- cluster_env: {{ openshift_openstack_public_dns_domain }}
- cluster_id: {{ openshift_openstack_stack_name }}
- group:
- str_replace:
- template: k8s_type.cluster_id
- params:
- k8s_type: dns
- cluster_id: {{ openshift_openstack_stack_name }}
- type: dns
- image: {{ openshift_openstack_dns_image }}
- flavor: {{ openshift_openstack_dns_flavor }}
- key_name: {{ openshift_openstack_keypair_name }}
-{% if openshift_openstack_provider_network_name %}
- net: {{ openshift_openstack_provider_network_name }}
- net_name: {{ openshift_openstack_provider_network_name }}
-{% else %}
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: {{ openshift_openstack_stack_name }}
-{% endif %}
- secgrp:
- - { get_resource: dns-secgrp }
- - { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
-{% endif %}
- volume_size: {{ openshift_openstack_dns_volume_size }}
-{% if not openshift_openstack_provider_network_name %}
- depends_on:
- - interface
-{% endif %}
-{% endif %}
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
new file mode 100644
index 000000000..eb13a58ba
--- /dev/null
+++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
@@ -0,0 +1,157 @@
+"""
+Ansible action plugin to generate pv and pvc dictionaries lists
+"""
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+
+class ActionModule(ActionBase):
+ """Action plugin to execute health checks."""
+
+ def get_templated(self, var_to_template):
+ """Return a properly templated ansible variable"""
+ return self._templar.template(self.task_vars.get(var_to_template))
+
+ def build_common(self, varname=None):
+ """Retrieve common variables for each pv and pvc type"""
+ volume = self.get_templated(str(varname) + '_volume_name')
+ size = self.get_templated(str(varname) + '_volume_size')
+ labels = self.task_vars.get(str(varname) + '_labels')
+ if labels:
+ labels = self._templar.template(labels)
+ else:
+ labels = dict()
+ access_modes = self.get_templated(str(varname) + '_access_modes')
+ return (volume, size, labels, access_modes)
+
+ def build_pv_nfs(self, varname=None):
+ """Build pv dictionary for nfs storage type"""
+ host = self.task_vars.get(str(varname) + '_host')
+ if host:
+ self._templar.template(host)
+ elif host is None:
+ groups = self.task_vars.get('groups')
+ default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group')
+ if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleModuleError("|failed no storage host detected")
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ directory = self.get_templated(str(varname) + '_nfs_directory')
+ path = directory + '/' + volume
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ def build_pv_openstack(self, varname=None):
+ """Build pv dictionary for openstack storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ filesystem = self.get_templated(str(varname) + '_openstack_filesystem')
+ volume_id = self.get_templated(str(varname) + '_openstack_volumeID')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ def build_pv_glusterfs(self, varname=None):
+ """Build pv dictionary for glusterfs storage type"""
+ volume, size, labels, access_modes = self.build_common(varname=varname)
+ endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints')
+ path = self.get_templated(str(varname) + '_glusterfs_path')
+ read_only = self.get_templated(str(varname) + '_glusterfs_readOnly')
+ return dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ def build_pv_dict(self, varname=None):
+ """Check for the existence of PV variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv and self._templar.template(create_pv):
+ if kind == 'nfs':
+ return self.build_pv_nfs(varname=varname)
+
+ elif kind == 'openstack':
+ return self.build_pv_openstack(varname=varname)
+
+ elif kind == 'glusterfs':
+ return self.build_pv_glusterfs(varname=varname)
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ varname)
+ raise errors.AnsibleModuleError(msg)
+ return None
+
+ def build_pvc_dict(self, varname=None):
+ """Check for the existence of PVC variables"""
+ kind = self.task_vars.get(str(varname) + '_kind')
+ if kind:
+ kind = self._templar.template(kind)
+ create_pv = self.task_vars.get(str(varname) + '_create_pv')
+ if create_pv:
+ create_pv = self._templar.template(create_pv)
+ create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
+ if create_pvc:
+ create_pvc = self._templar.template(create_pvc)
+ if kind != 'object' and create_pv and create_pvc:
+ volume, size, _, access_modes = self.build_common(varname=varname)
+ return dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return None
+
+ def run(self, tmp=None, task_vars=None):
+ """Run generate_pv_pvcs_list action plugin"""
+ result = super(ActionModule, self).run(tmp, task_vars)
+ # Ignore settting self.task_vars outside of init.
+ # pylint: disable=W0201
+ self.task_vars = task_vars or {}
+
+ result["changed"] = False
+ result["failed"] = False
+ result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
+ vars_to_check = ['openshift_hosted_registry_storage',
+ 'openshift_hosted_router_storage',
+ 'openshift_hosted_etcd_storage',
+ 'openshift_logging_storage',
+ 'openshift_loggingops_storage',
+ 'openshift_metrics_storage',
+ 'openshift_prometheus_storage',
+ 'openshift_prometheus_alertmanager_storage',
+ 'openshift_prometheus_alertbuffer_storage']
+ persistent_volumes = []
+ persistent_volume_claims = []
+ for varname in vars_to_check:
+ pv_dict = self.build_pv_dict(varname)
+ if pv_dict:
+ persistent_volumes.append(pv_dict)
+ pvc_dict = self.build_pvc_dict(varname)
+ if pvc_dict:
+ persistent_volume_claims.append(pvc_dict)
+ result["persistent_volumes"] = persistent_volumes
+ result["persistent_volume_claims"] = persistent_volume_claims
+ return result
diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml
new file mode 100644
index 000000000..b16e164e6
--- /dev/null
+++ b/roles/openshift_persistent_volumes/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+
+openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config'
+
+openshift_persistent_volume_extras: []
+openshift_persistent_volume_claims_extras: []
+
+glusterfs_pv: []
+glusterfs_pvc: []
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 19e9a56b7..48b0699ab 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -9,4 +9,5 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies: {}
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index e431e978c..0b4dd7d1f 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -9,39 +9,36 @@
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Deploy PersistentVolume definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
- src: persistent-volume.yml.j2
- when: persistent_volumes | length > 0
- changed_when: False
+- set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
+ path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
+ readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
+ capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+ access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
-- name: Create PersistentVolumes
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volumes.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pv_create_output
- when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
- changed_when: ('created' in pv_create_output.stdout)
+- name: create standard pv and pvc lists
+ # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ generate_pv_pvcs_list: {}
+ register: l_pv_pvcs_list
-- name: Deploy PersistentVolumeClaim definitions
- template:
- dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
- src: persistent-volume-claim.yml.j2
- when: persistent_volume_claims | length > 0
- changed_when: False
+- include_tasks: pv.yml
+ vars:
+ l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}"
+ persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}"
-- name: Create PersistentVolumeClaims
- command: >
- {{ openshift.common.client_binary }} create
- -f {{ mktemp.stdout }}/persistent-volume-claims.yml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- register: pvc_create_output
- when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
- changed_when: ('created' in pvc_create_output.stdout)
+- include_tasks: pvc.yml
+ vars:
+ l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}"
+ persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}"
- name: Delete temp directory
file:
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
new file mode 100644
index 000000000..346605ff7
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolume definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
+ src: persistent-volume.yml.j2
+ when: persistent_volumes | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumes
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volumes.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pv_create_output
+ when: persistent_volumes | length > 0
+ failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
new file mode 100644
index 000000000..e44f9b18f
--- /dev/null
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolumeClaim definitions
+ template:
+ dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
+ src: persistent-volume-claim.yml.j2
+ when: persistent_volume_claims | length > 0
+ changed_when: False
+
+- name: Create PersistentVolumeClaims
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/persistent-volume-claims.yml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ register: pvc_create_output
+ when: persistent_volume_claims | length > 0
+ failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index ee9dac7cb..9ec14208b 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -17,5 +17,5 @@ items:
capacity:
storage: "{{ volume.capacity }}"
accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
- {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }}
+ {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
{% endfor %}
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index f1eca1da6..1ebeacabf 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -38,11 +38,13 @@ openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-bl
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
+openshift_prometheus_<COMPONENT>_storage_class: <VALUE>
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
```
openshift_prometheus_storage_type: pvc
+openshift_prometheus_storage_class: glusterfs-storage
openshift_prometheus_alertmanager_pvc_name: alertmanager
openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index df331a4bb..e30108d2c 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -23,6 +23,7 @@ openshift_prometheus_pvc_name: prometheus
openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
+openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertmanager_storage_type: "emptydir"
@@ -30,6 +31,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
+openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertbuffer_storage_type: "emptydir"
@@ -37,6 +39,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
+openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index ad15dc65f..abc5dd476 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -131,6 +131,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_sc_name }}"
when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
@@ -140,6 +141,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}"
when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
@@ -149,6 +151,7 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}"
when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
# prometheus configmap
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index be749a2e1..f7bd58db3 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
@@ -125,13 +126,14 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
@@ -147,7 +149,6 @@ Dependencies
------------
* os_firewall
-* openshift_hosted_facts
* openshift_repos
* lib_openshift
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 814d6ff28..da34fab2a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
+openshift_storage_glusterfs_storageclass_default: False
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
@@ -45,12 +46,13 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if
openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
-openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
+openshift_storage_glusterfs_registry_storageclass_default: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 0cdd33880..6a4ef942b 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -10,6 +10,6 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4b33e91b4..315bc5614 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -82,7 +82,7 @@
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
-- include: glusterfs_deploy.yml
+- include_tasks: glusterfs_deploy.yml
when: glusterfs_is_native
- name: Create heketi service account
@@ -212,7 +212,7 @@
when:
- glusterfs_heketi_is_native
-- include: heketi_deploy_part1.yml
+- include_tasks: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
@@ -256,7 +256,7 @@
when:
- glusterfs_heketi_topology_load
-- include: heketi_deploy_part2.yml
+- include_tasks: heketi_deploy_part2.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
@@ -312,8 +312,8 @@
when:
- glusterfs_storageclass or glusterfs_s3_deploy
-- include: glusterblock_deploy.yml
+- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
-- include: gluster_s3_deploy.yml
+- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 71c1311cd..2ea7286f3 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
@@ -46,4 +47,4 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
glusterfs_nodes: "{{ groups.glusterfs | default([]) }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 30e83e79b..0c2fcb2c5 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -23,7 +23,7 @@
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: glusterfs_wipe
+ when: "'openshift' in hostvars[item] and glusterfs_wipe"
- name: Delete pre-existing GlusterFS config
file:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index d3cba61cf..b7cff6514 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
@@ -46,7 +47,7 @@
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
-- include: glusterfs_common.yml
+- include_tasks: glusterfs_common.yml
when:
- glusterfs_nodes | default([]) | count > 0
- "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
@@ -56,5 +57,5 @@
register: registry_volume
- name: Create GlusterFS registry volume
- command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
- when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}"
+ when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index d2d8c6c10..b48bfc88e 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -5,13 +5,15 @@
changed_when: False
check_mode: no
-- include: glusterfs_config.yml
+- include_tasks: glusterfs_config.yml
when:
- groups.glusterfs | default([]) | count > 0
-- include: glusterfs_registry.yml
- when:
- - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
+- include_tasks: glusterfs_registry.yml
+ when: >
+ groups.glusterfs_registry | default([]) | count > 0
+ or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs')
+ or (openshift_hosted_registry_storage_glusterfs_swap | default(False))
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
index 454e84aaf..5ea17428f 100644
--- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 98f7c317e..d61e6873a 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,4 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_os_firewall
-- role: openshift_hosted_facts
+- role: openshift_facts
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index c25cad74c..55e4024ec 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -20,25 +20,25 @@
- name: Ensure exports directory exists
file:
- path: "{{ openshift.hosted.registry.storage.nfs.directory }}"
+ path: "{{ openshift_hosted_registry_storage_nfs_directory }}"
state: directory
- name: Ensure export directories exist
file:
- path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
+ path: "{{ item }}"
state: directory
mode: 0777
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry }}"
- - "{{ openshift.metrics }}"
- - "{{ openshift.logging }}"
- - "{{ openshift.loggingops }}"
- - "{{ openshift.hosted.etcd }}"
- - "{{ openshift.prometheus }}"
- - "{{ openshift.prometheus.alertmanager }}"
- - "{{ openshift.prometheus.alertbuffer }}"
+ - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}"
+ - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}"
+ - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}"
+ - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}"
+ - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}"
+ - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}"
+ - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index c2a741035..2ec8db019 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,8 +1,8 @@
-{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
-{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
-{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
-{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
-{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
-{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
-{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
+{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }}
+{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }}
+{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }}
+{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }}
+{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }}
+{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }}
+{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }}
+{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }}
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index 49dd657b5..c8e7b6d7c 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -20,7 +20,7 @@
file: path={{osnl_mount_dir}}/{{ item }} owner=nfsnobody group=nfsnobody mode=0700
with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
-- include: nfs.yml
+- include_tasks: nfs.yml
- name: Create volume json file
template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..354699637 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,10 @@
---
openshift_protect_installed_version: True
+
+openshift_service_type_dict:
+ origin: origin
+ openshift-enterprise: atomic-openshift
+
+openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
+
+openshift_use_crio_only: False
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 5d7683120..d0ad4b7d2 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 4f9158ade..ae0f68a5b 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -101,13 +101,13 @@
when: is_containerized | bool
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
@@ -196,7 +196,7 @@
- openshift_version.startswith(openshift_release) | bool
msg: |-
You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
on host {{ inventory_hostname }}.
We will only install the latest RPMs, so please ensure you are getting the release
you expect. You may need to adjust your Ansible inventory, modify the repositories
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c40777bf1..c7ca5ceae 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -8,14 +8,14 @@
- openshift_version is not defined
- block:
- - name: Get available {{ openshift.common.service_type}} version
+ - name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift.common.service_type}}"
+ name: "{{ openshift_service_type}}"
ignore_excluders: true
register: rpm_results
- fail:
- msg: "Package {{ openshift.common.service_type}} not found"
+ msg: "Package {{ openshift_service_type}} not found"
when: not rpm_results.results.package_found
- set_fact:
diff --git a/roles/rhel_subscribe/README.md b/roles/rhel_subscribe/README.md
new file mode 100644
index 000000000..15eaf4f30
--- /dev/null
+++ b/roles/rhel_subscribe/README.md
@@ -0,0 +1,29 @@
+RHEL Subscribe
+==============
+
+Subscribes the RHEL servers and add the OpenShift enterprise repos.
+
+Role variables
+--------------
+
+### `rhsub_user`
+
+Username for the subscription-manager.
+
+### `rhsub_pass`
+
+Password for the subscription-manager.
+
+### `rhsub_pool`
+
+Name of the pool to attach (optional).
+
+### `rhsub_server`
+
+Custom hostname for the Satellite server (optional).
+
+### `openshift_release`
+
+Version for the OpenShift Enterprise repositories.
+
+Example: `3.6`
diff --git a/roles/rhel_subscribe/defaults/main.yml b/roles/rhel_subscribe/defaults/main.yml
new file mode 100644
index 000000000..80b2ab919
--- /dev/null
+++ b/roles/rhel_subscribe/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+rhsub_pool: 'Red Hat OpenShift Container Platform, Premium*'
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index fa74c9953..8acdfb969 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -1,25 +1,18 @@
---
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- set_fact:
- default_ose_version: '3.6'
- when: deployment_type == 'openshift-enterprise'
-
- set_fact:
- ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}"
-
-- fail:
- msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ openshift_release: "{{ openshift_release[1:] }}"
when:
- - deployment_type == 'openshift-enterprise'
- - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] )
+ - openshift_release is defined
+ - openshift_release[0] == 'v'
+
+- name: Disable all repositories
+ command: subscription-manager repos --disable="*"
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
--enable="rhel-7-fast-datapath-rpms"
register: subscribe_repos
until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 9ca49b569..3466b7e44 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -3,23 +3,17 @@
# to make it able to attach to a pool
# to make it able to enable repositories
-- set_fact:
- rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}"
- rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}"
- rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}"
- rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"
-
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
- when: rhel_subscription_user is not defined
+ msg: The rhsub_user variable is required for this role.
+ when: rhsub_user is not defined or not rhsub_user
- fail:
- msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
- when: rhel_subscription_pass is not defined
+ msg: The rhsub_pass variable is required for this role.
+ when: rhsub_pass is not defined or not rhsub_pass
- name: Detecting Atomic Host Operating System
stat:
@@ -27,10 +21,10 @@
register: ostree_booted
- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
args:
creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhel_subscription_server is defined and rhel_subscription_server
+ when: rhsub_server is defined and rhsub_server
- name: Install Red Hat Subscription manager
yum:
@@ -41,26 +35,26 @@
- name: RedHat subscriptions
redhat_subscription:
- username: "{{ rhel_subscription_user }}"
- password: "{{ rhel_subscription_pass }}"
+ username: "{{ rhsub_user }}"
+ password: "{{ rhsub_pass }}"
register: rh_subscription
until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_id
until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
+ command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only
register: openshift_pool_attached
until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
- fail:
- msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available or consumed pools"
+ msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools"
when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
- name: Attach to OpenShift Pool
@@ -69,7 +63,6 @@
until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
-- include: enterprise.yml
+- include_tasks: enterprise.yml
when:
- - deployment_type == 'openshift-enterprise'
- not ostree_booted.stat.exists | bool
diff --git a/setup.py b/setup.py
index 5bf48b5ad..5ba050b83 100644
--- a/setup.py
+++ b/setup.py
@@ -334,9 +334,9 @@ class OpenShiftAnsibleSyntaxCheck(Command):
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
- # TODO (rteague): This test will be enabled once we move to Ansible 2.4
- # result = self.deprecate_include(yaml_contents, yaml_file)
- # has_errors = result or has_errors
+ # Check for usage of include: directive
+ result = self.deprecate_include(yaml_contents, yaml_file)
+ has_errors = result or has_errors
if not has_errors:
print('...PASSED')
@@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command):
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- # Error on any entry points in 'common'
- if 'common' in playbook:
- print('{}Invalid entry point playbook. All playbooks must'
- ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
- has_errors = True
# --syntax-check each entry point playbook
- else:
- try:
- # Create a host group list to avoid WARNING on unmatched host patterns
- host_group_list = [
- 'etcd,masters,nodes,OSEv3',
- 'oo_all_hosts',
- 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
- 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
- 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
- 'oo_nodes_to_config,oo_nodes_to_upgrade',
- 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
- 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
- 'oo_lb_to_config',
- 'oo_nfs_to_config',
- 'glusterfs,glusterfs_registry,']
- subprocess.check_output(
- ['ansible-playbook', '-i ' + ','.join(host_group_list),
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
- has_errors = True
+ try:
+ # Create a host group list to avoid WARNING on unmatched host patterns
+ host_group_list = [
+ 'etcd,masters,nodes,OSEv3',
+ 'oo_all_hosts',
+ 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
+ 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
+ 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
+ 'oo_nodes_to_config,oo_nodes_to_upgrade',
+ 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
+ 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
+ 'oo_lb_to_config',
+ 'oo_nfs_to_config',
+ 'glusterfs,glusterfs_registry,']
+ subprocess.check_output(
+ ['ansible-playbook', '-i ' + ','.join(host_group_list),
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
if has_errors:
raise SystemExit(1)
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
index 74a55fa51..74a55fa51 100755..100644
--- a/test/integration/build-images.sh
+++ b/test/integration/build-images.sh
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
index 9875de9aa..006a71bd9 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
index 16ff41673..b4f18e3b5 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_availability' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
index 9f3aad7bd..7998023ae 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- action: openshift_health_check
@@ -22,4 +22,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
index 84e9360f5..3b8b15ff3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Break the break-yum repo
@@ -29,4 +29,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
index f4c1bedfa..269c0250b 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -19,4 +19,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
index 409057792..92408a669 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,7 +14,7 @@
post_tasks:
- block:
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "break-yum" }
- name: Remove the local repo entirely
@@ -25,4 +25,4 @@
checks: [ 'package_update' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index d88f82a4a..4e2b8a50c 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -15,7 +15,7 @@
- block:
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -23,4 +23,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
index 401ad1e21..e1f8d74e6 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -1,6 +1,6 @@
---
# NOTE: this test is probably superfluous since openshift_version already does it
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -16,14 +16,14 @@
- block:
# put the repo back to disabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", repo_enabled: 0 }
# test with wrong repo enabled
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
- action: openshift_health_check
args:
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
index 88613802b..600bbe9c3 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -14,11 +14,11 @@
- block:
# enable repo with extra minor version available
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_name: "ose-3.3" }
# disable extras so we control docker version
- - include: tasks/enable_repo.yml
+ - include_tasks: tasks/enable_repo.yml
vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
@@ -26,4 +26,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
index da3f6b844..079ca4253 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
@@ -1,5 +1,5 @@
---
-- include: ../../setup_container.yml
+- import_playbook: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
l_host_vars:
@@ -17,4 +17,4 @@
checks: [ 'package_version' ]
always: # destroy the container whether check passed or not
- - include: ../../teardown_container.yml
+ - include_tasks: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index e3459b376..0f0f8d366 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -46,16 +46,15 @@
- hosts: all
tasks:
-
# run before openshift_version to prevent it breaking
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2" }
-- include: ../../../playbooks/init/main.yml
+- import_playbook: ../../../playbooks/init/main.yml
- hosts: all
tasks:
# put it back like it was for the tests
- - include: preflight/playbooks/tasks/enable_repo.yml
+ - include_tasks: preflight/playbooks/tasks/enable_repo.yml
vars: { repo_name: "ose-3.2", enabled: False }
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
index 680b64602..680b64602 100755..100644
--- a/test/integration/run-tests.sh
+++ b/test/integration/run-tests.sh
diff --git a/tox.ini b/tox.ini
index 46738cae5..9c35915d5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,6 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
- integration
skipsdist=True
skip_missing_interpreters=True
@@ -14,7 +13,6 @@ deps =
-rtest-requirements.txt
unit: -eutils
py35-flake8: flake8-bugbear==17.3.0
- integration: docker-py==1.10.6
commands =
unit: pytest {posargs}
@@ -23,4 +21,3 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
ansible_syntax: python setup.py ansible_syntax
- integration: python -c 'print("run test/integration/run-tests.sh")'
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 1e2af2c61..dda8eb4c6 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb):
base_inventory.write('openshift_override_hostname_check=true\n')
if lb is not None:
- base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
base_inventory.write(
"openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
@@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False):
facts_env = os.environ.copy()
facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
- facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
if 'ansible_log_path' in CFG.settings:
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings: