summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--ansible.cfg6
-rw-r--r--docs/proposals/crt_management_proposal.md8
-rw-r--r--docs/proposals/role_decomposition.md14
-rw-r--r--files/origin-components/apiserver-template.yaml6
-rw-r--r--files/origin-components/console-config.yaml42
-rw-r--r--files/origin-components/console-rbac-template.yaml38
-rw-r--r--files/origin-components/console-template.yaml121
-rw-r--r--filter_plugins/oo_filters.py1001
-rw-r--r--filter_plugins/openshift_version.py69
-rw-r--r--images/installer/Dockerfile2
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo8
-rw-r--r--inventory/hosts.example33
-rw-r--r--inventory/hosts.grafana.example12
-rw-r--r--lookup_plugins/README.md1
-rw-r--r--openshift-ansible.spec307
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml2
-rw-r--r--playbooks/adhoc/uninstall.yml15
-rw-r--r--playbooks/aws/openshift-cluster/install.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_elb.yml9
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_s3.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml2
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml2
-rw-r--r--playbooks/aws/provisioning_vars.yml.example7
l---------playbooks/byo/filter_plugins1
l---------playbooks/byo/lookup_plugins1
l---------playbooks/byo/openshift-cluster/filter_plugins1
l---------playbooks/byo/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/byo/rhel_subscribe.yml6
-rw-r--r--playbooks/cluster-operator/aws/infrastructure.yml21
l---------playbooks/cluster-operator/aws/roles1
l---------playbooks/common/openshift-cluster/filter_plugins1
l---------playbooks/common/openshift-cluster/library1
l---------playbooks/common/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml52
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml3
l---------playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml8
l---------playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml67
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml2
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml15
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml18
-rw-r--r--playbooks/container-runtime/setup_storage.yml6
-rw-r--r--playbooks/deploy_cluster.yml3
-rw-r--r--playbooks/gcp/provision.yml2
-rw-r--r--playbooks/init/base_packages.yml37
-rw-r--r--playbooks/init/evaluate_groups.yml1
-rw-r--r--playbooks/init/facts.yml97
-rw-r--r--playbooks/init/main.yml9
-rw-r--r--playbooks/init/repos.yml10
-rw-r--r--playbooks/init/sanity_checks.yml60
-rw-r--r--playbooks/init/version.yml32
-rw-r--r--playbooks/openshift-checks/adhoc.yml1
-rw-r--r--playbooks/openshift-etcd/private/ca.yml2
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml6
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml30
l---------playbooks/openshift-etcd/private/filter_plugins1
l---------playbooks/openshift-etcd/private/lookup_plugins1
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml24
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml38
-rw-r--r--playbooks/openshift-etcd/private/restart.yml4
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml8
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml7
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml6
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml6
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml6
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml2
-rw-r--r--playbooks/openshift-etcd/upgrade.yml5
-rw-r--r--playbooks/openshift-glusterfs/README.md2
-rw-r--r--playbooks/openshift-glusterfs/private/config.yml10
l---------playbooks/openshift-glusterfs/private/filter_plugins1
l---------playbooks/openshift-glusterfs/private/lookup_plugins1
-rw-r--r--playbooks/openshift-grafana/config.yml4
-rw-r--r--playbooks/openshift-grafana/private/config.yml6
l---------playbooks/openshift-grafana/private/filter_plugins (renamed from playbooks/aws/openshift-cluster/filter_plugins)0
l---------playbooks/openshift-grafana/private/lookup_plugins (renamed from playbooks/aws/openshift-cluster/lookup_plugins)0
l---------playbooks/openshift-grafana/private/roles1
-rw-r--r--playbooks/openshift-hosted/private/config.yml4
-rw-r--r--playbooks/openshift-hosted/private/install_docker_gc.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml13
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_router.yml2
-rw-r--r--playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml26
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml14
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml20
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml6
l---------playbooks/openshift-loadbalancer/private/filter_plugins1
l---------playbooks/openshift-loadbalancer/private/lookup_plugins1
-rw-r--r--playbooks/openshift-logging/private/config.yml3
l---------playbooks/openshift-logging/private/filter_plugins1
l---------playbooks/openshift-logging/private/library1
l---------playbooks/openshift-logging/private/lookup_plugins1
-rw-r--r--playbooks/openshift-management/add_many_container_providers.yml2
-rw-r--r--playbooks/openshift-management/private/add_container_provider.yml2
-rw-r--r--playbooks/openshift-management/private/config.yml2
l---------playbooks/openshift-management/private/filter_plugins1
l---------playbooks/openshift-management/private/library1
-rw-r--r--playbooks/openshift-management/private/uninstall.yml2
-rw-r--r--playbooks/openshift-master/private/additional_config.yml4
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml1
-rw-r--r--playbooks/openshift-master/private/certificates.yml4
-rw-r--r--playbooks/openshift-master/private/config.yml25
l---------playbooks/openshift-master/private/filter_plugins1
l---------playbooks/openshift-master/private/library1
l---------playbooks/openshift-master/private/lookup_plugins1
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml54
-rw-r--r--playbooks/openshift-master/private/tasks/restart_hosts.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml2
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml4
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml2
-rw-r--r--playbooks/openshift-master/scaleup.yml1
-rw-r--r--playbooks/openshift-metrics/private/config.yml3
l---------playbooks/openshift-metrics/private/library1
l---------playbooks/openshift-nfs/private/filter_plugins1
l---------playbooks/openshift-nfs/private/lookup_plugins1
-rw-r--r--playbooks/openshift-node/private/additional_config.yml14
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml5
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml5
l---------playbooks/openshift-node/private/filter_plugins1
-rw-r--r--playbooks/openshift-node/private/image_prep.yml7
l---------playbooks/openshift-node/private/lookup_plugins1
-rw-r--r--playbooks/openshift-node/private/restart.yml2
-rw-r--r--playbooks/openshift-node/private/setup.yml3
-rw-r--r--playbooks/openshift-node/scaleup.yml1
-rw-r--r--playbooks/openshift-web-console/config.yml4
-rw-r--r--playbooks/openshift-web-console/private/config.yml31
l---------playbooks/openshift-web-console/private/roles1
-rw-r--r--playbooks/openstack/README.md10
-rw-r--r--playbooks/openstack/advanced-configuration.md127
-rwxr-xr-xplaybooks/openstack/inventory.py (renamed from playbooks/openstack/sample-inventory/inventory.py)20
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml3
-rw-r--r--playbooks/openstack/openshift-cluster/prerequisites.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml26
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml5
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml5
-rw-r--r--playbooks/prerequisites.yml7
-rw-r--r--playbooks/redeploy-certificates.yml2
-rw-r--r--roles/ansible_service_broker/meta/main.yml1
-rw-r--r--roles/ansible_service_broker/tasks/install.yml19
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/calico/meta/main.yml1
-rw-r--r--roles/calico/tasks/main.yml2
-rw-r--r--roles/calico_master/meta/main.yml1
-rw-r--r--roles/calico_master/tasks/main.yml4
-rw-r--r--roles/cockpit-ui/meta/main.yml2
-rw-r--r--roles/cockpit-ui/tasks/main.yml2
-rw-r--r--roles/cockpit/meta/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml4
-rw-r--r--roles/container_runtime/README.md4
-rw-r--r--roles/container_runtime/defaults/main.yml20
-rw-r--r--roles/container_runtime/meta/main.yml2
-rw-r--r--roles/container_runtime/tasks/common/post.yml4
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml6
-rw-r--r--roles/container_runtime/tasks/docker_storage_setup_overlay.yml10
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml29
-rw-r--r--roles/container_runtime/tasks/main.yml2
-rw-r--r--roles/container_runtime/tasks/package_docker.yml8
-rw-r--r--roles/container_runtime/tasks/registry_auth.yml2
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml15
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml8
-rw-r--r--roles/container_runtime/templates/crio-network.j29
-rw-r--r--roles/container_runtime/templates/docker_storage_setup.j212
-rw-r--r--roles/contiv/README.md4
-rw-r--r--roles/contiv/defaults/main.yml149
-rw-r--r--roles/contiv/meta/main.yml15
-rw-r--r--roles/contiv/tasks/aci.yml2
-rw-r--r--roles/contiv/tasks/api_proxy.yml120
-rw-r--r--roles/contiv/tasks/default_network.yml58
-rw-r--r--roles/contiv/tasks/download_bins.yml20
-rw-r--r--roles/contiv/tasks/etcd.yml114
-rw-r--r--roles/contiv/tasks/main.yml9
-rw-r--r--roles/contiv/tasks/netmaster.yml30
-rw-r--r--roles/contiv/tasks/netmaster_firewalld.yml23
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml51
-rw-r--r--roles/contiv/tasks/netplugin.yml33
-rw-r--r--roles/contiv/tasks/netplugin_firewalld.yml39
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml98
-rw-r--r--roles/contiv/tasks/old_version_cleanup.yml43
-rw-r--r--roles/contiv/tasks/old_version_cleanup_firewalld.yml11
-rw-r--r--roles/contiv/tasks/old_version_cleanup_iptables.yml44
-rw-r--r--roles/contiv/tasks/ovs.yml2
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml5
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml12
-rw-r--r--roles/contiv/templates/aci-gw.service6
-rw-r--r--roles/contiv/templates/aci_gw.j214
-rw-r--r--roles/contiv/templates/api-proxy-daemonset.yml.j257
-rw-r--r--roles/contiv/templates/api-proxy-secrets.yml.j212
-rw-r--r--roles/contiv/templates/contiv.cfg.j22
-rw-r--r--roles/contiv/templates/contiv.cfg.master.j22
-rw-r--r--roles/contiv/templates/etcd-daemonset.yml.j283
-rw-r--r--roles/contiv/templates/etcd-proxy-daemonset.yml.j255
-rw-r--r--roles/contiv/templates/etcd-scc.yml.j242
-rw-r--r--roles/contiv/templates/netmaster.env.j22
-rw-r--r--roles/contiv/templates/netmaster.j21
-rw-r--r--roles/contiv/templates/netmaster.service2
-rw-r--r--roles/contiv/templates/netplugin.j25
-rw-r--r--roles/contiv/templates/netplugin.service2
-rw-r--r--roles/contiv_auth_proxy/README.md29
-rw-r--r--roles/contiv_auth_proxy/defaults/main.yml12
-rw-r--r--roles/contiv_auth_proxy/files/auth-proxy.service13
-rw-r--r--roles/contiv_auth_proxy/files/cert.pem33
-rw-r--r--roles/contiv_auth_proxy/files/key.pem51
-rw-r--r--roles/contiv_auth_proxy/handlers/main.yml2
-rw-r--r--roles/contiv_auth_proxy/tasks/cleanup.yml10
-rw-r--r--roles/contiv_auth_proxy/tasks/main.yml37
-rw-r--r--roles/contiv_auth_proxy/templates/auth_proxy.j236
-rw-r--r--roles/contiv_auth_proxy/tests/inventory1
-rw-r--r--roles/contiv_auth_proxy/tests/test.yml5
-rw-r--r--roles/contiv_auth_proxy/vars/main.yml2
-rw-r--r--roles/contiv_facts/defaults/main.yaml9
-rw-r--r--roles/contiv_facts/tasks/fedora-install.yml12
-rw-r--r--roles/contiv_facts/tasks/main.yml65
-rw-r--r--roles/contiv_facts/tasks/rpm.yml8
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml26
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml22
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migration/migrate.yml2
-rw-r--r--roles/etcd/tasks/version_detect.yml4
-rw-r--r--roles/flannel/defaults/main.yaml2
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/meta/main.yml3
-rw-r--r--roles/flannel/tasks/main.yml12
-rw-r--r--roles/flannel_register/meta/main.yml3
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py5
-rw-r--r--roles/kuryr/meta/main.yml5
-rw-r--r--roles/kuryr/tasks/node.yaml2
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j219
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2357
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j27
-rw-r--r--roles/lib_openshift/library/conditional_set_fact.py (renamed from roles/openshift_sanitize_inventory/library/conditional_set_fact.py)18
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_scale.py351
-rw-r--r--roles/lib_os_firewall/README.md63
-rw-r--r--roles/lib_utils/action_plugins/generate_pv_pvcs_list.py (renamed from roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py)0
-rw-r--r--roles/lib_utils/action_plugins/sanity_checks.py181
-rw-r--r--roles/lib_utils/callback_plugins/aa_version_requirement.py (renamed from callback_plugins/aa_version_requirement.py)0
-rw-r--r--roles/lib_utils/callback_plugins/openshift_quick_installer.py (renamed from callback_plugins/openshift_quick_installer.py)4
-rw-r--r--roles/lib_utils/filter_plugins/oo_cert_expiry.py (renamed from roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py)1
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py627
-rw-r--r--roles/lib_utils/filter_plugins/openshift_aws_filters.py (renamed from roles/openshift_aws/filter_plugins/openshift_aws_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_hosted_filters.py (renamed from roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_master.py (renamed from roles/openshift_master_facts/filter_plugins/openshift_master.py)6
-rwxr-xr-xroles/lib_utils/library/delegated_serial_command.py (renamed from roles/etcd/library/delegated_serial_command.py)0
-rw-r--r--roles/lib_utils/library/kubeclient_ca.py (renamed from library/kubeclient_ca.py)0
-rw-r--r--[-rwxr-xr-x]roles/lib_utils/library/modify_yaml.py (renamed from library/modify_yaml.py)0
-rw-r--r--roles/lib_utils/library/openshift_cert_expiry.py (renamed from roles/openshift_certificate_expiry/library/openshift_cert_expiry.py)0
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py (renamed from roles/openshift_cli/library/openshift_container_binary_sync.py)0
-rw-r--r--[-rwxr-xr-x]roles/lib_utils/library/os_firewall_manage_iptables.py (renamed from roles/lib_os_firewall/library/os_firewall_manage_iptables.py)0
-rw-r--r--roles/lib_utils/library/rpm_q.py (renamed from library/rpm_q.py)0
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py143
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py117
-rw-r--r--roles/lib_utils/test/conftest.py (renamed from roles/openshift_certificate_expiry/test/conftest.py)53
-rw-r--r--roles/lib_utils/test/openshift_master_facts_bad_input_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_conftest.py (renamed from roles/openshift_master_facts/test/conftest.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py)0
-rw-r--r--roles/lib_utils/test/test_fakeopensslclasses.py (renamed from roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py)0
-rw-r--r--roles/lib_utils/test/test_load_and_handle_cert.py (renamed from roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py)0
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nuage_ca/meta/main.yml2
-rw-r--r--roles/nuage_ca/tasks/main.yaml2
-rw-r--r--roles/nuage_common/tasks/main.yml6
-rw-r--r--roles/nuage_master/meta/main.yml2
-rw-r--r--roles/nuage_master/tasks/main.yaml18
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml2
-rw-r--r--roles/nuage_node/meta/main.yml2
-rw-r--r--roles/nuage_node/tasks/main.yaml8
-rw-r--r--roles/openshift_aws/README.md6
-rw-r--r--roles/openshift_aws/defaults/main.yml35
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml3
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml8
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml2
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml1
-rw-r--r--roles/openshift_builddefaults/meta/main.yml1
-rw-r--r--roles/openshift_buildoverrides/meta/main.yml1
-rw-r--r--roles/openshift_buildoverrides/vars/main.yml1
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml23
-rw-r--r--roles/openshift_certificate_expiry/meta/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_cli/defaults/main.yml2
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_cli/tasks/main.yml10
-rw-r--r--roles/openshift_cloud_provider/meta/main.yml1
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/vsphere.yml6
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j24
-rw-r--r--roles/openshift_cloud_provider/templates/vsphere.conf.j215
-rw-r--r--roles/openshift_cloud_provider/vars/main.yml1
-rw-r--r--roles/openshift_cluster_autoscaler/README.md2
-rw-r--r--roles/openshift_cluster_autoscaler/meta/main.yml1
-rw-r--r--roles/openshift_daemonset_config/defaults/main.yml19
-rw-r--r--roles/openshift_daemonset_config/meta/main.yml3
-rw-r--r--roles/openshift_daemonset_config/tasks/main.yml58
-rw-r--r--roles/openshift_daemonset_config/templates/daemonset.yml.j2142
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml1
-rw-r--r--roles/openshift_docker_gc/meta/main.yml1
-rw-r--r--roles/openshift_etcd/meta/main.yml1
-rw-r--r--roles/openshift_etcd_client_certificates/meta/main.yml3
-rw-r--r--roles/openshift_etcd_client_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_etcd_facts/meta/main.yml1
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml4
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json3
-rw-r--r--roles/openshift_examples/meta/main.yml4
-rw-r--r--roles/openshift_examples/tasks/main.yml28
-rw-r--r--roles/openshift_excluder/tasks/install.yml10
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/README.md2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml6
-rw-r--r--roles/openshift_facts/defaults/main.yml10
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py195
-rw-r--r--roles/openshift_grafana/defaults/main.yml12
-rw-r--r--roles/openshift_grafana/files/grafana-ocp-oauth.yml661
-rw-r--r--roles/openshift_grafana/files/grafana-ocp.yml76
-rw-r--r--roles/openshift_grafana/files/openshift-cluster-monitoring.json5138
-rw-r--r--roles/openshift_grafana/meta/main.yml13
-rw-r--r--roles/openshift_grafana/tasks/gf-permissions.yml12
-rw-r--r--roles/openshift_grafana/tasks/main.yml122
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py2
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py50
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py14
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py27
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py58
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py41
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py8
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py12
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py12
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py6
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py29
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py11
-rw-r--r--roles/openshift_hosted/defaults/main.yml2
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry.yml25
-rw-r--r--roles/openshift_hosted/tasks/registry_storage.yml4
-rw-r--r--roles/openshift_hosted/tasks/router.yml15
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml2
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml15
-rw-r--r--roles/openshift_hosted/tasks/wait_for_pod.yml12
-rw-r--r--roles/openshift_hosted_templates/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml4
-rw-r--r--roles/openshift_hosted_templates/tasks/main.yml14
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml8
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml2
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml16
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j22
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j22
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py34
-rw-r--r--roles/openshift_logging/library/logging_patch.py112
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py17
-rw-r--r--roles/openshift_logging/meta/main.yaml1
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml17
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml6
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml48
-rw-r--r--roles/openshift_logging/tasks/main.yaml7
-rw-r--r--roles/openshift_logging/tasks/patch_configmap_file.yaml35
-rw-r--r--roles/openshift_logging/tasks/patch_configmap_files.yaml31
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/set_defaults_from_current.yml34
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml1
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml1
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml17
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml42
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml54
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml62
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j22
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml6
-rw-r--r--roles/openshift_logging_eventrouter/meta/main.yaml (renamed from roles/openshift_node_facts/meta/main.yml)8
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml1
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml3
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml44
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml8
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml1
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml30
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_manage_node/meta/main.yml1
-rw-r--r--roles/openshift_manage_node/tasks/main.yml7
-rw-r--r--roles/openshift_manageiq/meta/main.yml1
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml4
-rw-r--r--roles/openshift_management/tasks/main.yml2
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml6
-rw-r--r--roles/openshift_master/defaults/main.yml19
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml15
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/set_loopback_context.yml8
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_master/tasks/upgrade.yml2
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml23
-rw-r--r--roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml2
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j224
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master_certificates/meta/main.yml3
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml28
l---------roles/openshift_master_facts/filter_plugins/oo_filters.py1
-rw-r--r--roles/openshift_master_facts/meta/main.yml1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml8
-rw-r--r--roles/openshift_metrics/meta/main.yaml5
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml19
-rw-r--r--roles/openshift_metrics/tasks/main.yaml4
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml14
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml6
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml15
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml1
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j22
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_named_certificates/meta/main.yml1
-rw-r--r--roles/openshift_named_certificates/tasks/main.yml5
-rw-r--r--roles/openshift_nfs/meta/main.yml2
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml77
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml7
-rw-r--r--roles/openshift_node/tasks/config.yml12
-rw-r--r--roles/openshift_node/tasks/container_images.yml2
-rw-r--r--roles/openshift_node/tasks/dnsmasq_install.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml42
-rw-r--r--roles/openshift_node/tasks/main.yml16
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml8
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade.yml7
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml10
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml6
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade/stop_services.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade_pre.yml6
-rw-r--r--roles/openshift_node/templates/node.service.j22
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j218
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service10
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service2
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml2
-rw-r--r--roles/openshift_node_certificates/meta/main.yml3
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml33
-rw-r--r--roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py32
-rw-r--r--roles/openshift_node_facts/tasks/main.yml25
-rw-r--r--roles/openshift_node_group/defaults/main.yml8
-rw-r--r--roles/openshift_openstack/defaults/main.yml5
-rw-r--r--roles/openshift_openstack/tasks/check-prerequisites.yml2
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j295
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j23
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml1
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml4
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml4
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j22
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j24
-rw-r--r--roles/openshift_project_request_template/tasks/main.yml4
-rw-r--r--roles/openshift_prometheus/meta/main.yaml5
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml10
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml2
-rw-r--r--roles/openshift_provisioners/meta/main.yaml1
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml6
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml22
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml6
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/tasks/rhel_repos.yml5
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j227
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py10
-rw-r--r--roles/openshift_sanitize_inventory/meta/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml26
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml3
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml1
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml26
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml6
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml2
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j22
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j26
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml104
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml16
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml12
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j217
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md8
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml1
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml2
-rw-r--r--roles/openshift_version/tasks/check_available_rpms.yml10
-rw-r--r--roles/openshift_version/tasks/first_master.yml30
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml (renamed from roles/openshift_version/tasks/set_version_containerized.yml)7
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml20
-rw-r--r--roles/openshift_version/tasks/main.yml210
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml42
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml24
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/meta/main.yaml19
-rw-r--r--roles/openshift_web_console/tasks/install.yml111
-rw-r--r--roles/openshift_web_console/tasks/main.yml8
-rw-r--r--roles/openshift_web_console/tasks/remove.yml5
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml71
-rw-r--r--roles/openshift_web_console/vars/default_images.yml4
-rw-r--r--roles/openshift_web_console/vars/main.yml6
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml4
-rw-r--r--roles/os_firewall/README.md4
-rw-r--r--roles/template_service_broker/meta/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml14
-rw-r--r--roles/template_service_broker/tasks/remove.yml6
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/tuned/tasks/main.yml2
-rw-r--r--setup.py17
-rw-r--r--test/ci/README.md14
-rw-r--r--test/ci/extra_vars/default.yml4
-rwxr-xr-xtest/ci/install.sh34
-rw-r--r--test/ci/inventory/group_vars/OSEv3/checks.yml4
-rw-r--r--test/ci/inventory/group_vars/OSEv3/general.yml23
-rw-r--r--test/ci/inventory/group_vars/OSEv3/logging.yml37
-rw-r--r--test/ci/inventory/group_vars/all.yml13
-rw-r--r--test/ci/inventory/host_vars/localhost.yml8
-rw-r--r--test/ci/inventory/local.txt23
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml2
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml2
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml2
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml2
-rw-r--r--test/openshift_version_tests.py32
-rw-r--r--test/tox-inventory.txt109
-rw-r--r--test/tox-inventory.txt_extras3
-rw-r--r--tox.ini1
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py6
643 files changed, 13638 insertions, 4016 deletions
diff --git a/.papr.inventory b/.papr.inventory
index aa4324c21..c678e76aa 100644
--- a/.papr.inventory
+++ b/.papr.inventory
@@ -6,7 +6,7 @@ etcd
[OSEv3:vars]
ansible_ssh_user=root
ansible_python_interpreter=/usr/bin/python3
-deployment_type=origin
+openshift_deployment_type=origin
openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
openshift_check_min_host_disk_gb=1.5
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 1ca23082d..4c4a70702 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.10.0 ./
+3.9.0-0.21.0 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index 3788e9bfb..3e93f3fc8 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -13,5 +13,5 @@ The table below outlines the defaults per `openshift_deployment_type`:
| **openshift_service_type** (also used for package names) | origin | atomic-openshift |
| **openshift.common.config_base** | /etc/origin | /etc/origin |
| **openshift_data_dir** | /var/lib/origin | /var/lib/origin |
-| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
+| **openshift.master.registry_url oreg_url_node** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
| **Image Streams** | centos | rhel |
diff --git a/ansible.cfg b/ansible.cfg
index e4d72553e..67149cb35 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -10,10 +10,6 @@
#log_path = /tmp/ansible.log
# Additional default options for OpenShift Ansible
-callback_plugins = callback_plugins/
-filter_plugins = filter_plugins/
-lookup_plugins = lookup_plugins/
-library = library/
forks = 20
host_key_checking = False
retry_files_enabled = False
@@ -26,7 +22,7 @@ fact_caching = jsonfile
fact_caching_connection = $HOME/ansible/facts
fact_caching_timeout = 600
callback_whitelist = profile_tasks
-inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
# work around privilege escalation timeouts in ansible:
timeout = 30
diff --git a/docs/proposals/crt_management_proposal.md b/docs/proposals/crt_management_proposal.md
index 5fc1ad08d..bf4048744 100644
--- a/docs/proposals/crt_management_proposal.md
+++ b/docs/proposals/crt_management_proposal.md
@@ -30,7 +30,7 @@ configure, restart, or change the container runtime as much as feasible.
## Design
The container_runtime role should be comprised of 3 'pseudo-roles' which will be
-consumed using include_role; each component area should be enabled/disabled with
+consumed using import_role; each component area should be enabled/disabled with
a boolean value, defaulting to true.
I call them 'pseudo-roles' because they are more or less independent functional
@@ -46,15 +46,15 @@ an abundance of roles), and make things as modular as possible.
# container_runtime_setup.yml
- hosts: "{{ openshift_runtime_manage_hosts | default('oo_nodes_to_config') }}"
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: install.yml
when: openshift_container_runtime_install | default(True) | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: storage.yml
when: openshift_container_runtime_storage | default(True) | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: configure.yml
when: openshift_container_runtime_configure | default(True) | bool
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 37d080d5c..61690e8bd 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -115,12 +115,12 @@ providing the location of the generated certificates to the individual roles.
generated_certs_dir: "{{openshift.common.config_base}}/logging"
## Elasticsearch
-- include_role:
+- import_role:
name: openshift_logging_elasticsearch
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
-- include_role:
+- import_role:
name: openshift_logging_elasticsearch
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -130,7 +130,7 @@ providing the location of the generated certificates to the individual roles.
## Kibana
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -144,7 +144,7 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -173,7 +173,7 @@ providing the location of the generated certificates to the individual roles.
## Curator
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -183,7 +183,7 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -201,7 +201,7 @@ providing the location of the generated certificates to the individual roles.
## Fluentd
-- include_role:
+- import_role:
name: openshift_logging_fluentd
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml
index 035e4734b..4dd9395d0 100644
--- a/files/origin-components/apiserver-template.yaml
+++ b/files/origin-components/apiserver-template.yaml
@@ -4,7 +4,7 @@ metadata:
name: template-service-broker-apiserver
parameters:
- name: IMAGE
- value: openshift/origin:latest
+ value: openshift/origin-template-service-broker:latest
- name: NAMESPACE
value: openshift-template-service-broker
- name: LOGLEVEL
@@ -40,14 +40,14 @@ objects:
image: ${IMAGE}
imagePullPolicy: IfNotPresent
command:
- - "/usr/bin/openshift"
+ - "/usr/bin/template-service-broker"
- "start"
- "template-service-broker"
- "--secure-port=8443"
- "--audit-log-path=-"
- "--tls-cert-file=/var/serving-cert/tls.crt"
- "--tls-private-key-file=/var/serving-cert/tls.key"
- - "--loglevel=${LOGLEVEL}"
+ - "--v=${LOGLEVEL}"
- "--config=/var/apiserver-config/apiserver-config.yaml"
ports:
- containerPort: 8443
diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml
new file mode 100644
index 000000000..901518b28
--- /dev/null
+++ b/files/origin-components/console-config.yaml
@@ -0,0 +1,42 @@
+apiVersion: webconsole.config.openshift.io/v1
+kind: WebConsoleConfiguration
+clusterInfo:
+ consolePublicURL: https://127.0.0.1:8443/console/
+ loggingPublicURL: ""
+ logoutPublicURL: ""
+ masterPublicURL: https://127.0.0.1:8443
+ metricsPublicURL: ""
+# TODO: The new extensions properties cannot be set until
+# origin-web-console-server has been updated with the API changes since
+# `extensions` in the old asset config was an array.
+#extensions:
+# scriptURLs: []
+# stylesheetURLs: []
+# properties: null
+features:
+ inactivityTimeoutMinutes: 0
+servingInfo:
+ bindAddress: 0.0.0.0:8443
+ bindNetwork: tcp4
+ certFile: /var/serving-cert/tls.crt
+ clientCA: ""
+ keyFile: /var/serving-cert/tls.key
+ maxRequestsInFlight: 0
+ namedCertificates: null
+ requestTimeoutSeconds: 0
+
+# START deprecated properties
+# These properties have been renamed and will be removed from the install
+# in a future pull. Keep both the old and new properties for now so that
+# the install is not broken while the origin-web-console image is updated.
+extensionDevelopment: false
+extensionProperties: null
+extensionScripts: null
+extensionStylesheets: null
+extensions: null
+loggingPublicURL: ""
+logoutURL: ""
+masterPublicURL: https://127.0.0.1:8443
+metricsPublicURL: ""
+publicURL: https://127.0.0.1:8443/console/
+# END deprecated properties
diff --git a/files/origin-components/console-rbac-template.yaml b/files/origin-components/console-rbac-template.yaml
new file mode 100644
index 000000000..9ee117199
--- /dev/null
+++ b/files/origin-components/console-rbac-template.yaml
@@ -0,0 +1,38 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: web-console-server-rbac
+parameters:
+- name: NAMESPACE
+ # This namespace cannot be changed. Only `openshift-web-console` is supported.
+ value: openshift-web-console
+objects:
+
+
+# allow grant powers to the webconsole server for cluster inspection
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRole
+ metadata:
+ name: system:openshift:web-console-server
+ rules:
+ - apiGroups:
+ - "servicecatalog.k8s.io"
+ resources:
+ - clusterservicebrokers
+ verbs:
+ - get
+ - list
+ - watch
+
+# Grant the service account for the web console
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: system:openshift:web-console-server
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:web-console-server
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: webconsole
diff --git a/files/origin-components/console-template.yaml b/files/origin-components/console-template.yaml
new file mode 100644
index 000000000..7bf2d0cf4
--- /dev/null
+++ b/files/origin-components/console-template.yaml
@@ -0,0 +1,121 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: openshift-web-console
+ annotations:
+ openshift.io/display-name: OpenShift Web Console
+ description: The server for the OpenShift web console.
+ iconClass: icon-openshift
+ tags: openshift,infra
+ openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server
+ openshift.io/support-url: https://access.redhat.com
+ openshift.io/provider-display-name: Red Hat, Inc.
+parameters:
+- name: IMAGE
+ value: openshift/origin-web-console:latest
+- name: NAMESPACE
+ # This namespace cannot be changed. Only `openshift-web-console` is supported.
+ value: openshift-web-console
+- name: LOGLEVEL
+ value: "0"
+- name: API_SERVER_CONFIG
+- name: NODE_SELECTOR
+ value: "{}"
+- name: REPLICA_COUNT
+ value: "1"
+objects:
+
+# to create the web console server
+- apiVersion: apps/v1beta1
+ kind: Deployment
+ metadata:
+ namespace: ${NAMESPACE}
+ name: webconsole
+ labels:
+ app: openshift-web-console
+ webconsole: "true"
+ spec:
+ replicas: "${{REPLICA_COUNT}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: webconsole
+ labels:
+ webconsole: "true"
+ spec:
+ serviceAccountName: webconsole
+ containers:
+ - name: webconsole
+ image: ${IMAGE}
+ imagePullPolicy: IfNotPresent
+ command:
+ - "/usr/bin/origin-web-console"
+ - "--audit-log-path=-"
+ - "-v=${LOGLEVEL}"
+ - "--config=/var/webconsole-config/webconsole-config.yaml"
+ ports:
+ - containerPort: 8443
+ volumeMounts:
+ - mountPath: /var/serving-cert
+ name: serving-cert
+ - mountPath: /var/webconsole-config
+ name: webconsole-config
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8443
+ scheme: HTTPS
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 8443
+ scheme: HTTPS
+ nodeSelector: "${{NODE_SELECTOR}}"
+ volumes:
+ - name: serving-cert
+ secret:
+ defaultMode: 400
+ secretName: webconsole-serving-cert
+ - name: webconsole-config
+ configMap:
+ defaultMode: 440
+ name: webconsole-config
+
+# to create the config for the web console
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ namespace: ${NAMESPACE}
+ name: webconsole-config
+ labels:
+ app: openshift-web-console
+ data:
+ webconsole-config.yaml: ${API_SERVER_CONFIG}
+
+# to be able to assign powers to the process
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ namespace: ${NAMESPACE}
+ name: webconsole
+ labels:
+ app: openshift-web-console
+
+# to be able to expose web console inside the cluster
+- apiVersion: v1
+ kind: Service
+ metadata:
+ namespace: ${NAMESPACE}
+ name: webconsole
+ labels:
+ app: openshift-web-console
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert
+ spec:
+ selector:
+ webconsole: "true"
+ ports:
+ - name: https
+ port: 443
+ targetPort: 8443
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
deleted file mode 100644
index 3eaf2aed5..000000000
--- a/filter_plugins/oo_filters.py
+++ /dev/null
@@ -1,1001 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# pylint: disable=too-many-lines
-"""
-Custom filters for use in openshift-ansible
-"""
-import json
-import os
-import pdb
-import random
-import re
-
-from base64 import b64encode
-from collections import Mapping
-# pylint no-name-in-module and import-error disabled here because pylint
-# fails to properly detect the packages when installed in a virtualenv
-from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
-from distutils.version import LooseVersion # pylint:disable=no-name-in-module,import-error
-from operator import itemgetter
-
-import pkg_resources
-import yaml
-
-from ansible import errors
-from ansible.parsing.yaml.dumper import AnsibleDumper
-
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
- from ansible.compat.six.moves.urllib.parse import urlparse
-except ImportError:
- from ansible.module_utils.six import string_types, u
- from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-HAS_OPENSSL = False
-try:
- import OpenSSL.crypto
- HAS_OPENSSL = True
-except ImportError:
- pass
-
-
-def oo_pdb(arg):
- """ This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- """
- pdb.set_trace()
- return arg
-
-
-def get_attr(data, attribute=None):
- """ This looks up dictionary attributes of the form a.b.c and returns
- the value.
-
- If the key isn't present, None is returned.
- Ex: data = {'a': {'b': {'c': 5}}}
- attribute = "a.b.c"
- returns 5
- """
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- ptr = data
- for attr in attribute.split('.'):
- if attr in ptr:
- ptr = ptr[attr]
- else:
- ptr = None
- break
-
- return ptr
-
-
-def oo_flatten(data):
- """ This filter plugin will flatten a list of lists
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to flatten a List")
-
- return [item for sublist in data for item in sublist]
-
-
-def oo_merge_dicts(first_dict, second_dict):
- """ Merge two dictionaries where second_dict values take precedence.
- Ex: first_dict={'a': 1, 'b': 2}
- second_dict={'b': 3, 'c': 4}
- returns {'a': 1, 'b': 3, 'c': 4}
- """
- if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
- raise errors.AnsibleFilterError("|failed expects to merge two dicts")
- merged = first_dict.copy()
- merged.update(second_dict)
- return merged
-
-
-def oo_merge_hostvars(hostvars, variables, inventory_hostname):
- """ Merge host and play variables.
-
- When ansible version is greater than or equal to 2.0.0,
- merge hostvars[inventory_hostname] with variables (ansible vars)
- otherwise merge hostvars with hostvars['inventory_hostname'].
-
- Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
- 'openshift_other_variable': '7'}
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
-
- hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
- """
- if not isinstance(hostvars, Mapping):
- raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
- if not isinstance(variables, dict):
- raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
- if not isinstance(inventory_hostname, string_types):
- raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
- ansible_version = pkg_resources.get_distribution("ansible").version # pylint: disable=maybe-no-member
- merged_hostvars = {}
- if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
- merged_hostvars = oo_merge_dicts(
- hostvars[inventory_hostname], variables)
- else:
- merged_hostvars = oo_merge_dicts(
- hostvars[inventory_hostname], hostvars)
- return merged_hostvars
-
-
-def oo_collect(data_list, attribute=None, filters=None):
- """ This takes a list of dict and collects all attributes specified into a
- list. If filter is specified then we will include all items that
- match _ALL_ of filters. If a dict entry is missing the key in a
- filter it will be excluded from the match.
- Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
- attribute = 'a'
- filters = {'z': 'z'}
- returns [1, 2, 3]
-
- This also deals with lists of lists with dict as elements.
- Ex: data_list = [
- [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'b':6, 'z': 'z'} # True, return
- ],
- [ {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
- ],
- {'a':5, 'z': 'z'}, # True, return
- ]
- attribute = 'a'
- filters = {'z': 'z'}
- returns [1, 2, 3, 5]
- """
- if not isinstance(data_list, list):
- raise errors.AnsibleFilterError("oo_collect expects to filter on a List")
-
- if not attribute:
- raise errors.AnsibleFilterError("oo_collect expects attribute to be set")
-
- data = []
- retval = []
-
- for item in data_list:
- if isinstance(item, list):
- retval.extend(oo_collect(item, attribute, filters))
- else:
- data.append(item)
-
- if filters is not None:
- if not isinstance(filters, dict):
- raise errors.AnsibleFilterError(
- "oo_collect expects filter to be a dict")
- retval.extend([get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))])
- else:
- retval.extend([get_attr(d, attribute) for d in data])
-
- retval = [val for val in retval if val is not None]
-
- return retval
-
-
-def oo_select_keys_from_list(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
-
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects to filter on a list")
-
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [oo_select_keys(item, keys) for item in data]
-
- return oo_flatten(retval)
-
-
-def oo_select_keys(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
-
- if not isinstance(data, Mapping):
- raise errors.AnsibleFilterError("|oo_select_keys failed expects to filter on a dict or object")
-
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|oo_select_keys failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys if key in data]
-
- return retval
-
-
-def oo_prepend_strings_in_list(data, prepend):
- """ This takes a list of strings and prepends a string to each item in the
- list
- Ex: data = ['cart', 'tree']
- prepend = 'apple-'
- returns ['apple-cart', 'apple-tree']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not all(isinstance(x, string_types) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list"
- " of strings")
- retval = [prepend + s for s in data]
- return retval
-
-
-def oo_combine_key_value(data, joiner='='):
- """Take a list of dict in the form of { 'key': 'value'} and
- arrange them as a list of strings ['key=value']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- rval = []
- for item in data:
- rval.append("%s%s%s" % (item['key'], joiner, item['value']))
-
- return rval
-
-
-def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
- """Take a dict in the form of { 'key': 'value', 'key': 'value' } and
- arrange them as a string 'key=value key=value'
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
-
- return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
-
-
-def oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
- """Take a dict and arrange them as a list of dicts
-
- Input data:
- {'region': 'infra', 'test_k': 'test_v'}
-
- Return data:
- [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
-
- Written for use of the oc_label module
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
-
- rval = []
- for label in data.items():
- rval.append({key_title: label[0], value_title: label[1]})
-
- return rval
-
-
-def oo_ami_selector(data, image_name):
- """ This takes a list of amis and an image name and attempts to return
- the latest ami.
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- if not data:
- return None
- else:
- if image_name is None or not image_name.endswith('_*'):
- ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
- return ami['ami_id']
- else:
- ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
- ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
- return ami['ami_id']
-
-
-def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
- """ This takes a dictionary of volume definitions and returns a valid ec2
- volume definition based on the host_type and the values in the
- dictionary.
- The dictionary should look similar to this:
- { 'master':
- { 'root':
- { 'volume_size': 10, 'device_type': 'gp2',
- 'iops': 500
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
- },
- 'node':
- { 'root':
- { 'volume_size': 10, 'device_type': 'io1',
- 'iops': 1000
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
- }
- }
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
- if host_type not in ['master', 'node', 'etcd']:
- raise errors.AnsibleFilterError("|failed expects etcd, master or node"
- " as the host type")
-
- root_vol = data[host_type]['root']
- root_vol['device_name'] = '/dev/sda1'
- root_vol['delete_on_termination'] = True
- if root_vol['device_type'] != 'io1':
- root_vol.pop('iops', None)
- if host_type in ['master', 'node'] and 'docker' in data[host_type]:
- docker_vol = data[host_type]['docker']
- docker_vol['device_name'] = '/dev/xvdb'
- docker_vol['delete_on_termination'] = True
- if docker_vol['device_type'] != 'io1':
- docker_vol.pop('iops', None)
- if docker_ephemeral:
- docker_vol.pop('device_type', None)
- docker_vol.pop('delete_on_termination', None)
- docker_vol['ephemeral'] = 'ephemeral0'
- return [root_vol, docker_vol]
- elif host_type == 'etcd' and 'etcd' in data[host_type]:
- etcd_vol = data[host_type]['etcd']
- etcd_vol['device_name'] = '/dev/xvdb'
- etcd_vol['delete_on_termination'] = True
- if etcd_vol['device_type'] != 'io1':
- etcd_vol.pop('iops', None)
- return [root_vol, etcd_vol]
- return [root_vol]
-
-
-def oo_split(string, separator=','):
- """ This splits the input string into a list. If the input string is
- already a list we will return it as is.
- """
- if isinstance(string, list):
- return string
- return string.split(separator)
-
-
-def oo_list_to_dict(lst, separator='='):
- """ This converts a list of ["k=v"] to a dictionary {k: v}.
- """
- kvs = [i.split(separator) for i in lst]
- return {k: v for k, v in kvs}
-
-
-def oo_haproxy_backend_masters(hosts, port):
- """ This takes an array of dicts and returns an array of dicts
- to be used as a backend for the haproxy role
- """
- servers = []
- for idx, host_info in enumerate(hosts):
- server = dict(name="master%s" % idx)
- server_ip = host_info['openshift']['common']['ip']
- server['address'] = "%s:%s" % (server_ip, port)
- server['opts'] = 'check'
- servers.append(server)
- return servers
-
-
-def oo_filter_list(data, filter_attr=None):
- """ This returns a list, which contains all items where filter_attr
- evaluates to true
- Ex: data = [ { a: 1, b: True },
- { a: 3, b: False },
- { a: 5, b: True } ]
- filter_attr = 'b'
- returns [ { a: 1, b: True },
- { a: 5, b: True } ]
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
-
- if not isinstance(filter_attr, string_types):
- raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
-
- # Gather up the values for the list of keys passed in
- return [x for x in data if filter_attr in x and x[filter_attr]]
-
-
-def oo_nodes_with_label(nodes, label, value=None):
- """ Filters a list of nodes by label and value (if provided)
-
- It handles labels that are in the following variables by priority:
- openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
-
- Examples:
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
-
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- value = 'green'
- returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
-
- Args:
- nodes (list[dict]): list of node to node variables
- label (str): label to filter `nodes` by
- value (Optional[str]): value of `label` to filter by Defaults
- to None.
-
- Returns:
- list[dict]: nodes filtered by label and value (if provided)
- """
- if not isinstance(nodes, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(label, string_types):
- raise errors.AnsibleFilterError("failed expects label to be a string")
- if value is not None and not isinstance(value, string_types):
- raise errors.AnsibleFilterError("failed expects value to be a string")
-
- def label_filter(node):
- """ filter function for testing if node should be returned """
- if not isinstance(node, dict):
- raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
- if 'openshift_node_labels' in node:
- labels = node['openshift_node_labels']
- elif 'cli_openshift_node_labels' in node:
- labels = node['cli_openshift_node_labels']
- elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
- labels = node['openshift']['node']['labels']
- else:
- return False
-
- if isinstance(labels, string_types):
- labels = yaml.safe_load(labels)
- if not isinstance(labels, dict):
- raise errors.AnsibleFilterError(
- "failed expected node labels to be a dict or serializable to a dict"
- )
- return label in labels and (value is None or labels[label] == value)
-
- return [n for n in nodes if label_filter(n)]
-
-
-def oo_parse_heat_stack_outputs(data):
- """ Formats the HEAT stack output into a usable form
-
- The goal is to transform something like this:
-
- +---------------+-------------------------------------------------+
- | Property | Value |
- +---------------+-------------------------------------------------+
- | capabilities | [] | |
- | creation_time | 2015-06-26T12:26:26Z | |
- | description | OpenShift cluster | |
- | … | … |
- | outputs | [ |
- | | { |
- | | "output_value": "value_A" |
- | | "description": "This is the value of Key_A" |
- | | "output_key": "Key_A" |
- | | }, |
- | | { |
- | | "output_value": [ |
- | | "value_B1", |
- | | "value_B2" |
- | | ], |
- | | "description": "This is the value of Key_B" |
- | | "output_key": "Key_B" |
- | | }, |
- | | ] |
- | parameters | { |
- | … | … |
- +---------------+-------------------------------------------------+
-
- into something like this:
-
- {
- "Key_A": "value_A",
- "Key_B": [
- "value_B1",
- "value_B2"
- ]
- }
- """
-
- # Extract the “outputs” JSON snippet from the pretty-printed array
- in_outputs = False
- outputs = ''
-
- line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
- for line in data['stdout_lines']:
- match = line_regex.match(line)
- if match:
- if match.group(1) == 'outputs':
- in_outputs = True
- elif match.group(1) != '':
- in_outputs = False
- if in_outputs:
- outputs += match.group(2)
-
- outputs = json.loads(outputs)
-
- # Revamp the “outputs” to put it in the form of a “Key: value” map
- revamped_outputs = {}
- for output in outputs:
- revamped_outputs[output['output_key']] = output['output_value']
-
- return revamped_outputs
-
-
-# pylint: disable=too-many-branches
-def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
- """ Parses names from list of certificate hashes.
-
- Ex: certificates = [{ "certfile": "/root/custom1.crt",
- "keyfile": "/root/custom1.key",
- "cafile": "/root/custom-ca1.crt" },
- { "certfile": "custom2.crt",
- "keyfile": "custom2.key",
- "cafile": "custom-ca2.crt" }]
-
- returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom1.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
- "names": [ "public-master-host.com",
- "other-master-host.com" ] },
- { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom2.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
- "names": [ "some-hostname.com" ] }]
- """
- if not isinstance(named_certs_dir, string_types):
- raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
-
- if not isinstance(internal_hostnames, list):
- raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
-
- if not HAS_OPENSSL:
- raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
-
- for certificate in certificates:
- if 'names' in certificate.keys():
- continue
- else:
- certificate['names'] = []
-
- if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
- raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
- (certificate['certfile'], certificate['keyfile']))
-
- try:
- st_cert = open(certificate['certfile'], 'rt').read()
- cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
- certificate['names'].append(str(cert.get_subject().commonName.decode()))
- for i in range(cert.get_extension_count()):
- if cert.get_extension(i).get_short_name() == 'subjectAltName':
- for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
- certificate['names'].append(name)
- except Exception:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
- "please specify certificate names in host inventory"))
-
- certificate['names'] = list(set(certificate['names']))
- if 'cafile' not in certificate:
- certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
- if not certificate['names']:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
- "detected a collision with internal hostname, please specify " +
- "certificate names in host inventory"))
-
- for certificate in certificates:
- # Update paths for configuration
- certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
- certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
- if 'cafile' in certificate:
- certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
- return certificates
-
-
-def oo_pretty_print_cluster(data, prefix='tag_'):
- """ Read a subset of hostvars and build a summary of the cluster
- in the following layout:
-
-"c_id": {
-"master": {
-"default": [
- { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
-]
-"node": {
-"infra": [
- { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
-],
-"compute": [
- { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
-...
-]
-}
- """
-
- def _get_tag_value(tags, key):
- """ Extract values of a map implemented as a set.
- Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
- key = 'bar'
- returns 'value2'
- """
- for tag in tags:
- if tag[:len(prefix) + len(key)] == prefix + key:
- return tag[len(prefix) + len(key) + 1:]
- raise KeyError(key)
-
- def _add_host(clusters,
- clusterid,
- host_type,
- sub_host_type,
- host):
- """ Add a new host in the clusters data structure """
- if clusterid not in clusters:
- clusters[clusterid] = {}
- if host_type not in clusters[clusterid]:
- clusters[clusterid][host_type] = {}
- if sub_host_type not in clusters[clusterid][host_type]:
- clusters[clusterid][host_type][sub_host_type] = []
- clusters[clusterid][host_type][sub_host_type].append(host)
-
- clusters = {}
- for host in data:
- try:
- _add_host(clusters=clusters,
- clusterid=_get_tag_value(host['group_names'], 'clusterid'),
- host_type=_get_tag_value(host['group_names'], 'host-type'),
- sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
- host={'name': host['inventory_hostname'],
- 'public IP': host['oo_public_ipv4'],
- 'private IP': host['oo_private_ipv4']})
- except KeyError:
- pass
- return clusters
-
-
-def oo_generate_secret(num_bytes):
- """ generate a session secret """
-
- if not isinstance(num_bytes, int):
- raise errors.AnsibleFilterError("|failed expects num_bytes is int")
-
- return b64encode(os.urandom(num_bytes)).decode('utf-8')
-
-
-def to_padded_yaml(data, level=0, indent=2, **kw):
- """ returns a yaml snippet padded to match the indent level you specify """
- if data in [None, ""]:
- return ""
-
- try:
- transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper, **kw))
- padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return "\n{0}".format(padded)
- except Exception as my_e:
- raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-
-
-def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
- """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
- names with proper version (if provided)
-
- If 3.1 rpms are passed in they will only be augmented with the
- correct version. This is important for hosts that are running both
- Masters and Nodes.
- """
- if not isinstance(rpms, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if openshift_version is not None and not isinstance(openshift_version, string_types):
- raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
-
- rpms_31 = []
- for rpm in rpms:
- if 'atomic' not in rpm:
- rpm = rpm.replace("openshift", "atomic-openshift")
- if openshift_version:
- rpm = rpm + openshift_version
- rpms_31.append(rpm)
-
- return rpms_31
-
-
-def oo_pods_match_component(pods, deployment_type, component):
- """ Filters a list of Pods and returns the ones matching the deployment_type and component
- """
- if not isinstance(pods, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(deployment_type, string_types):
- raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
- if not isinstance(component, string_types):
- raise errors.AnsibleFilterError("failed expects component to be a string")
-
- image_prefix = 'openshift/origin-'
- if deployment_type == 'openshift-enterprise':
- image_prefix = 'openshift3/ose-'
-
- matching_pods = []
- image_regex = image_prefix + component + r'.*'
- for pod in pods:
- for container in pod['spec']['containers']:
- if re.search(image_regex, container['image']):
- matching_pods.append(pod)
- break # stop here, don't add a pod more than once
-
- return matching_pods
-
-
-def oo_get_hosts_from_hostvars(hostvars, hosts):
- """ Return a list of hosts from hostvars """
- retval = []
- for host in hosts:
- try:
- retval.append(hostvars[host])
- except errors.AnsibleError:
- # host does not exist
- pass
-
- return retval
-
-
-def oo_image_tag_to_rpm_version(version, include_dash=False):
- """ Convert an image tag string to an RPM version if necessary
- Empty strings and strings that are already in rpm version format
- are ignored. Also remove non semantic version components.
-
- Ex. v3.2.0.10 -> -3.2.0.10
- v1.2.0-rc1 -> -1.2.0
- """
- if not isinstance(version, string_types):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- if version.startswith("v"):
- version = version[1:]
- # Strip release from requested version, we no longer support this.
- version = version.split('-')[0]
-
- if include_dash and version and not version.startswith("-"):
- version = "-" + version
-
- return version
-
-
-def oo_hostname_from_url(url):
- """ Returns the hostname contained in a URL
-
- Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
- """
- if not isinstance(url, string_types):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- parse_result = urlparse(url)
- if parse_result.netloc != '':
- return parse_result.netloc
- else:
- # netloc wasn't parsed, assume url was missing scheme and path
- return parse_result.path
-
-
-# pylint: disable=invalid-name, unused-argument
-def oo_openshift_loadbalancer_frontends(
- api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- """TODO: Document me."""
- loadbalancer_frontends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(api_port)],
- 'default_backend': 'atomic-openshift-api'}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- loadbalancer_frontends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(nuage_rest_port)],
- 'default_backend': 'nuage-monitor'})
- return loadbalancer_frontends
-
-
-# pylint: disable=invalid-name
-def oo_openshift_loadbalancer_backends(
- api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- """TODO: Document me."""
- loadbalancer_backends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': oo_haproxy_backend_masters(servers_hostvars, api_port)}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- # pylint: disable=line-too-long
- loadbalancer_backends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
- return loadbalancer_backends
-
-
-def oo_chomp_commit_offset(version):
- """Chomp any "+git.foo" commit offset string from the given `version`
- and return the modified version string.
-
-Ex:
-- chomp_commit_offset(None) => None
-- chomp_commit_offset(1337) => "1337"
-- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
-- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
-- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
- """
- if version is None:
- return version
- else:
- # Stringify, just in case it's a Number type. Split by '+' and
- # return the first split. No concerns about strings without a
- # '+', .split() returns an array of the original string.
- return str(version).split('+')[0]
-
-
-def oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
- """Generates a random string of given length from a set of alphanumeric characters.
- The default source uses [a-z][A-Z][0-9]
- Ex:
- - oo_random_word(3) => aB9
- - oo_random_word(4, source='012') => 0123
- """
- return ''.join(random.choice(source) for i in range(length))
-
-
-def oo_contains_rule(source, apiGroups, resources, verbs):
- '''Return true if the specified rule is contained within the provided source'''
-
- rules = source['rules']
-
- if rules:
- for rule in rules:
- if set(rule['apiGroups']) == set(apiGroups):
- if set(rule['resources']) == set(resources):
- if set(rule['verbs']) == set(verbs):
- return True
-
- return False
-
-
-def oo_selector_to_string_list(user_dict):
- """Convert a dict of selectors to a key=value list of strings
-
-Given input of {'region': 'infra', 'zone': 'primary'} returns a list
-of items as ['region=infra', 'zone=primary']
- """
- selectors = []
- for key in user_dict:
- selectors.append("{}={}".format(key, user_dict[key]))
- return selectors
-
-
-def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
- """Parse the Service Account Secrets list, `sa_secrets`, (as from
-oc_serviceaccount_secret:state=list) and return the name of the secret
-containing the `secret_hint` string. For example, by default this will
-return the name of the secret holding the SA bearer token.
-
-Only provide the 'results' object to this filter. This filter expects
-to receive a list like this:
-
- [
- {
- "name": "management-admin-dockercfg-p31s2"
- },
- {
- "name": "management-admin-token-bnqsh"
- }
- ]
-
-
-Returns:
-
-* `secret_name` [string] - The name of the secret matching the
- `secret_hint` parameter. By default this is the secret holding the
- SA's bearer token.
-
-Example playbook usage:
-
-Register a return value from oc_serviceaccount_secret with and pass
-that result to this filter plugin.
-
- - name: Get all SA Secrets
- oc_serviceaccount_secret:
- state: list
- service_account: management-admin
- namespace: management-infra
- register: sa
-
- - name: Save the SA bearer token secret name
- set_fact:
- management_token: "{{ sa.results | oo_filter_sa_secrets }}"
-
- - name: Get the SA bearer token value
- oc_secret:
- state: list
- name: "{{ management_token }}"
- namespace: management-infra
- decode: true
- register: sa_secret
-
- - name: Print the bearer token value
- debug:
- var: sa_secret.results.decoded.token
-
- """
- secret_name = None
-
- for secret in sa_secrets:
- # each secret is a hash
- if secret['name'].find(secret_hint) == -1:
- continue
- else:
- secret_name = secret['name']
- break
-
- return secret_name
-
-
-class FilterModule(object):
- """ Custom ansible filter mapping """
-
- # pylint: disable=no-self-use, too-few-public-methods
- def filters(self):
- """ returns a mapping of filters to methods """
- return {
- "oo_select_keys": oo_select_keys,
- "oo_select_keys_from_list": oo_select_keys_from_list,
- "oo_chomp_commit_offset": oo_chomp_commit_offset,
- "oo_collect": oo_collect,
- "oo_flatten": oo_flatten,
- "oo_pdb": oo_pdb,
- "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
- "oo_ami_selector": oo_ami_selector,
- "oo_ec2_volume_definition": oo_ec2_volume_definition,
- "oo_combine_key_value": oo_combine_key_value,
- "oo_combine_dict": oo_combine_dict,
- "oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
- "oo_split": oo_split,
- "oo_list_to_dict": oo_list_to_dict,
- "oo_filter_list": oo_filter_list,
- "oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
- "oo_parse_named_certificates": oo_parse_named_certificates,
- "oo_haproxy_backend_masters": oo_haproxy_backend_masters,
- "oo_pretty_print_cluster": oo_pretty_print_cluster,
- "oo_generate_secret": oo_generate_secret,
- "oo_nodes_with_label": oo_nodes_with_label,
- "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
- "oo_pods_match_component": oo_pods_match_component,
- "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
- "oo_image_tag_to_rpm_version": oo_image_tag_to_rpm_version,
- "oo_merge_dicts": oo_merge_dicts,
- "oo_hostname_from_url": oo_hostname_from_url,
- "oo_merge_hostvars": oo_merge_hostvars,
- "oo_openshift_loadbalancer_frontends": oo_openshift_loadbalancer_frontends,
- "oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
- "to_padded_yaml": to_padded_yaml,
- "oo_random_word": oo_random_word,
- "oo_contains_rule": oo_contains_rule,
- "oo_selector_to_string_list": oo_selector_to_string_list,
- "oo_filter_sa_secrets": oo_filter_sa_secrets,
- }
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
deleted file mode 100644
index 7a70b158b..000000000
--- a/filter_plugins/openshift_version.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
-Custom version comparison filters for use in openshift-ansible
-"""
-
-# pylint can't locate distutils.version within virtualenv
-# https://github.com/PyCQA/pylint/issues/73
-# pylint: disable=no-name-in-module, import-error
-from distutils.version import LooseVersion
-
-
-def gte_function_builder(name, gte_version):
- """
- Build and return a version comparison function.
-
- Ex: name = 'oo_version_gte_3_6'
- version = '3.6'
-
- returns oo_version_gte_3_6, a function which based on the
- version will return true if the provided version is greater
- than or equal to the function's version
- """
- def _gte_function(version):
- """
- Dynamic function created by gte_function_builder.
-
- Ex: version = '3.1'
- returns True/False
- """
- version_gte = False
- if str(version) >= LooseVersion(gte_version):
- version_gte = True
- return version_gte
- _gte_function.__name__ = name
- return _gte_function
-
-
-# pylint: disable=too-few-public-methods
-class FilterModule(object):
- """
- Filters for version checking.
- """
- # Each element of versions is composed of (major, minor_start, minor_end)
- # Origin began versioning 3.x with 3.6, so begin 3.x with 3.6.
- versions = [(3, 6, 10)]
-
- def __init__(self):
- """
- Creates a new FilterModule for ose version checking.
- """
- self._filters = {}
-
- # For each set of (major, minor, minor_iterations)
- for major, minor_start, minor_end in self.versions:
- # For each minor version in the range
- for minor in range(minor_start, minor_end):
- # Create the function name
- func_name = 'oo_version_gte_{}_{}'.format(major, minor)
- # Create the function with the builder
- func = gte_function_builder(func_name, "{}.{}.0".format(major, minor))
- # Add the function to the mapping
- self._filters[func_name] = func
-
- def filters(self):
- """
- Return the filters mapping.
- """
- return self._filters
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index 0d977d48f..b1390480a 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /
# install ansible and deps
RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
&& rpm -V $INSTALL_PKGS $EPEL_PKGS \
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 5245771d0..05df6b43a 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
diff --git a/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo b/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo
new file mode 100644
index 000000000..7bb8502e9
--- /dev/null
+++ b/images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo
@@ -0,0 +1,8 @@
+[google-cloud-sdk]
+name=google-cloud-sdk
+baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
diff --git a/inventory/hosts.example b/inventory/hosts.example
index d857cd1a7..da60b63e6 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -84,6 +84,9 @@ openshift_release=v3.7
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_templates:
+# login: /path/to/login-template.html
+# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead.
#openshift_master_oauth_template=/path/to/login-template.html
# Configure imagePolicyConfig in the master config
@@ -125,7 +128,7 @@ openshift_release=v3.7
#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used
# unless you know what you are doing!!
-# The following two variables are used when opneshift_use_crio is True
+# The following two variables are used when openshift_use_crio is True
# and cleans up after builds that pass through docker.
# Enable docker garbage collection when using cri-o
#openshift_crio_enable_docker_gc=false
@@ -194,6 +197,10 @@ openshift_release=v3.7
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
#openshift_repos_enable_testing=false
+# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in
+# a disconnected and containerized installation, use osm_etcd_image to specify the image to use:
+#osm_etcd_image=rhel7/etcd
+
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
@@ -279,8 +286,21 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_region=region
#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
#
+# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting
+#openshift_cloudprovider_openstack_blockstorage_version=v2
+#
# GCE
#openshift_cloudprovider_kind=gce
+#
+# vSphere
+#openshift_cloudprovider_kind=vsphere
+#openshift_cloudprovider_vsphere_username=username
+#openshift_cloudprovider_vsphere_password=password
+#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host
+#openshift_cloudprovider_vsphere_datacenter=datacenter
+#openshift_cloudprovider_vsphere_datastore=datastore
+#openshift_cloudprovider_vsphere_folder=optional_folder_name
+
# Project Configuration
#osm_project_request_message=''
@@ -892,6 +912,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}]
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
@@ -941,7 +962,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
+# by openshift_deployment_type=origin
#openshift_enable_origin_repo=false
# Validity of the auto-generated OpenShift certificates in days.
@@ -988,6 +1009,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
#
+# A timeout to wait for nodes to drain pods can be specified to ensure that the
+# upgrade continues even if nodes fail to drain pods in the allowed time. The
+# default value of 0 will wait indefinitely allowing the admin to investigate
+# the root cause and ensuring that disruption budgets are respected. If the
+# a timeout of 0 is used there will also be one attempt to re-try draining the
+# node. If a non zero timeout is specified there will be no attempt to retry.
+#openshift_upgrade_nodes_drain_timeout=0
+#
# Multiple data migrations take place and if they fail they will fail the upgrade
# You may wish to disable these or make them non fatal
#
diff --git a/inventory/hosts.grafana.example b/inventory/hosts.grafana.example
new file mode 100644
index 000000000..a5999578f
--- /dev/null
+++ b/inventory/hosts.grafana.example
@@ -0,0 +1,12 @@
+[OSEv3:children]
+masters
+nodes
+
+[OSEv3:vars]
+# Grafana Configuration
+#gf_datasource_name="example"
+#gf_prometheus_namespace="openshift-metrics"
+#gf_oauth=true
+
+[masters]
+master
diff --git a/lookup_plugins/README.md b/lookup_plugins/README.md
deleted file mode 100644
index f05d608e5..000000000
--- a/lookup_plugins/README.md
+++ /dev/null
@@ -1 +0,0 @@
-openshift-ansible lookup plugins.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 0d5964dda..a7943d5f4 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.10.0%{?dist}
+Release: 0.21.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -24,9 +24,6 @@ Requires: tar
Requires: %{name}-docs = %{version}-%{release}
Requires: %{name}-playbooks = %{version}-%{release}
Requires: %{name}-roles = %{version}-%{release}
-Requires: %{name}-filter-plugins = %{version}-%{release}
-Requires: %{name}-lookup-plugins = %{version}-%{release}
-Requires: %{name}-callback-plugins = %{version}-%{release}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
@@ -52,8 +49,6 @@ popd
# Base openshift-ansible install
mkdir -p %{buildroot}%{_datadir}/%{name}
mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible_plugins
-cp -rp library %{buildroot}%{_datadir}/ansible/%{name}/
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -88,31 +83,6 @@ rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
# touch a file in contiv so that it can be added to SCM's
touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
-# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
-pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
-ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
-popd
-
-# openshift-ansible-filter-plugins install
-cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# openshift-ansible-lookup-plugins install
-cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# openshift-ansible-callback-plugins install
-cp -rp callback_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# create symlinks from /usr/share/ansible/plugins/lookup ->
-# /usr/share/ansible_plugins/lookup_plugins
-pushd %{buildroot}%{_datadir}
-mkdir -p ansible/plugins
-pushd ansible/plugins
-ln -s ../../ansible_plugins/lookup_plugins lookup
-ln -s ../../ansible_plugins/filter_plugins filter
-ln -s ../../ansible_plugins/callback_plugins callback
-popd
-popd
-
# atomic-openshift-utils install
pushd utils
%{__python} setup.py install --skip-build --root %{buildroot}
@@ -131,7 +101,6 @@ popd
%license LICENSE
%dir %{_datadir}/ansible/%{name}
%{_datadir}/ansible/%{name}/files
-%{_datadir}/ansible/%{name}/library
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
# ----------------------------------------------------------------------------------
@@ -155,9 +124,6 @@ BuildArch: noarch
Summary: Openshift and Atomic Enterprise Ansible Playbooks
Requires: %{name} = %{version}-%{release}
Requires: %{name}-roles = %{version}-%{release}
-Requires: %{name}-lookup-plugins = %{version}-%{release}
-Requires: %{name}-filter-plugins = %{version}-%{release}
-Requires: %{name}-callback-plugins = %{version}-%{release}
BuildArch: noarch
%description playbooks
@@ -198,9 +164,9 @@ end
# ----------------------------------------------------------------------------------
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name} = %{version}-%{release}
-Requires: %{name}-lookup-plugins = %{version}-%{release}
-Requires: %{name}-filter-plugins = %{version}-%{release}
-Requires: %{name}-callback-plugins = %{version}-%{release}
+Obsoletes: %{name}-lookup-plugins
+Obsoletes: %{name}-filter-plugins
+Obsoletes: %{name}-callback-plugins
BuildArch: noarch
%description roles
@@ -209,55 +175,6 @@ BuildArch: noarch
%files roles
%{_datadir}/ansible/%{name}/roles
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-filter-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package filter-plugins
-Summary: Openshift and Atomic Enterprise Ansible filter plugins
-Requires: %{name} = %{version}-%{release}
-BuildArch: noarch
-Requires: pyOpenSSL
-
-%description filter-plugins
-%{summary}.
-
-%files filter-plugins
-%{_datadir}/ansible_plugins/filter_plugins
-%{_datadir}/ansible/plugins/filter
-
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-lookup-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package lookup-plugins
-Summary: Openshift and Atomic Enterprise Ansible lookup plugins
-Requires: %{name} = %{version}-%{release}
-BuildArch: noarch
-
-%description lookup-plugins
-%{summary}.
-
-%files lookup-plugins
-%{_datadir}/ansible_plugins/lookup_plugins
-%{_datadir}/ansible/plugins/lookup
-
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-callback-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package callback-plugins
-Summary: Openshift and Atomic Enterprise Ansible callback plugins
-Requires: %{name} = %{version}-%{release}
-BuildArch: noarch
-
-%description callback-plugins
-%{summary}.
-
-%files callback-plugins
-%{_datadir}/ansible_plugins/callback_plugins
-%{_datadir}/ansible/plugins/callback
-
# ----------------------------------------------------------------------------------
# atomic-openshift-utils subpackage
# ----------------------------------------------------------------------------------
@@ -285,6 +202,214 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Jan 17 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.21.0
+- Add call to 3.8 playbook in 3.9 upgrade (sdodson@redhat.com)
+- Remove 3.8 and 3.9 specific steps right now (sdodson@redhat.com)
+- Exclude 3.9 packages during 3.8 upgrade (sdodson@redhat.com)
+- fix typos (sdodson@redhat.com)
+- Ensure openshift_client_binary is set (sdodson@redhat.com)
+- Add init/main.yml to etc-upgrade (mgugino@redhat.com)
+- Fix a typo in "Determine if growpart is installed" (vrutkovs@redhat.com)
+- Check rc for commands with openshift_client_binary and failed_when
+ (vrutkovs@redhat.com)
+- Update console config for API changes (spadgett@redhat.com)
+- include elasticsearch container name (jvallejo@redhat.com)
+- openshift_checks: repair adhoc list-checks mode (lmeyer@redhat.com)
+- Remove tuned-profiles from list of master packages upgraded
+ (sdodson@redhat.com)
+- Add missing task that got dropped in a refactor (sdodson@redhat.com)
+- Web Console: use a different var for asset config (vrutkovs@redhat.com)
+- Document the inventory change (tomas@sedovic.cz)
+- Move the OpenStack dynamic inventory from sample (tomas@sedovic.cz)
+- fix bug 1534271 (wmeng@redhat.com)
+- Don't use from ansible.module_utils.six as its no longer available in Ansible
+ 2.4 (vrutkovs@redhat.com)
+- Add console RBAC template (spadgett@redhat.com)
+- Setup master groups in order to use the master group's ansible_ssh_user to
+ pull bootstrap kubeconfig. (abutcher@redhat.com)
+- adding ability to add network policy objects. (shawn.hurley21@gmail.com)
+- add python2-boto3 package for centos-based origin-ansible container image
+ (jdiaz@redhat.com)
+- adding ability to interact with network resources. (shawn.hurley21@gmail.com)
+- Adding .ini to inventory_ignore_extensions (bedin@redhat.com)
+
+* Mon Jan 15 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.20.0
+- Adjust openstack provider dependencies versions (bdobreli@redhat.com)
+- Fix openstack provider playbook name in docs (bdobreli@redhat.com)
+- Install web console on upgrade (spadgett@redhat.com)
+- Add var for controller to enable async bindings (jpeeler@redhat.com)
+- Add cluster-operator playbook directory. (abutcher@redhat.com)
+- Move s3 & elb provisioning into their own playbooks s.t. they are applied
+ outside of the openshift_aws master provisioning tasks. (abutcher@redhat.com)
+- Update to AWS EC2 root vol size so that Health Check tasks pass
+ (mazzystr@gmail.com)
+- Configure Kuryr CNI daemon (mdulko@redhat.com)
+- Clean up host-local IPAM data while nodes are drained (danw@redhat.com)
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.19.0
+-
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.18.0
+-
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.17.0
+- Update latest image streams and templates (sdodson@redhat.com)
+- Use webconsole.config.openshift.io/v1 API group (spadgett@redhat.com)
+- Add missing v3.9 gluster templates (sdodson@redhat.com)
+- Spelling and grammar changes to the advanced-configuration.md file.
+ (mbruzek@gmail.com)
+- Fixing openshift_hosted variable. (kwoodson@redhat.com)
+- Update deployment and apiserver with new certs (jpeeler@redhat.com)
+- Move more plugins to lib_utils (mgugino@redhat.com)
+- Add the ability to specify a timeout for node drain operations
+ (sdodson@redhat.com)
+- Add defaults for openshift_pkg_version (mgugino@redhat.com)
+- Fix typo in the advanced config docs (tomas@sedovic.cz)
+- Write guide on setting up PVs with Cinder (tomas@sedovic.cz)
+- Allow using server names in openstack dynamic inv (tomas@sedovic.cz)
+- Specify the Cinder version in the inventory (tomas@sedovic.cz)
+- Add documentation example (joel.pearson@gmail.com)
+- Add blockstorage version for openstack (joel.pearson@gmail.com)
+- logging: fix jinja filters to support py3 (vrutkovs@redhat.com)
+- Ability to specify override tolerations via the buildconfig overrider
+ (cdaley@redhat.com)
+- Chmod temp dirs created on localhost (mgugino@redhat.com)
+- Bug 1532787 - Add empty node selector to openshift-web-console namespace
+ (spadgett@redhat.com)
+- Remove become statements (mgugino@redhat.com)
+- Bug 1527178 - installation of logging stack failed: Invalid version specified
+ for Elasticsearch (nhosoi@redhat.com)
+- Limit host group scope on control-plane upgrades (mgugino@redhat.com)
+- Refactor version and move some checks into sanity_checks.py
+ (mgugino@redhat.com)
+- Updating tsb image names and template (ewolinet@redhat.com)
+- Ensure that openshift_facts role is imported whenever we rely on
+ openshift_client_binary (sdodson@redhat.com)
+- Add key check for facts_for_clusterrolebindings (nakayamakenjiro@gmail.com)
+- Update web console template (spadgett@redhat.com)
+- Use openshift_node_use_openshift_sdn when doing a containerized node upgrade
+ (vrutkovs@redhat.com)
+- Add iptables save handler (ichavero@redhat.com)
+- Fix: change import_role to include_role (mgugino@redhat.com)
+- docker storage setup for ami building (jdiaz@redhat.com)
+- ensure containerized bools are cast (mgugino@redhat.com)
+- Properly cast crio boolean variables to bool (mgugino@redhat.com)
+- Build containerized host group dynamically (mgugino@redhat.com)
+- install base_packages on oo_all_hosts (mgugino@redhat.com)
+- Add key existing check to collect facts for rolebidings
+ (nakayamakenjiro@gmail.com)
+- 3.9 upgrade: remove openshift.common.service_type (vrutkovs@redhat.com)
+- container-engine: move registry_auth.yml before pull (gscrivan@redhat.com)
+- Fix error in variable in comment (mscherer@users.noreply.github.com)
+- Switch back to dynamic include_role in logging loops (sdodson@redhat.com)
+- Use Contiv version 1.2.0 (flamingo@2thebatcave.com)
+- Contiv multi-master and other fixes (flamingo@2thebatcave.com)
+- Add missing dependency on openshift_facts (sdodson@redhat.com)
+- upgrades: set openshift_client_binary fact when running on oo_first_master
+ host (vrutkovs@redhat.com)
+- Install web console server (spadgett@redhat.com)
+- Remove become=no from various roles and tasks (mgugino@redhat.com)
+- Don't overwrite node's systemd units for containerized install
+ (vrutkovs@redhat.com)
+- Migrate to import_role for static role inclusion (sdodson@redhat.com)
+- docker_upgrade_check: skip repoquery calls on containerized setups
+ (vrutkovs@redhat.com)
+- Adding logic to disable and reenable external communication to ES during full
+ restart (ewolinet@redhat.com)
+- Provide example on how to use osm_etcd_image in a disconnected and
+ containerized installation (tkarlsso@redhat.com)
+- crio: create /etc/sysconfig/crio-storage (gscrivan@redhat.com)
+- crio: configure proxy variables (gscrivan@redhat.com)
+- Fix docker_image_availability checks (mgugino@redhat.com)
+- Install node packages in one task instead of 3 (mgugino@redhat.com)
+- Don't hardcode the network interface in the openshift_logging_mux role
+ (nkinder@redhat.com)
+- failure_summary: make sure msg is always a string (vrutkovs@redhat.com)
+- Adding logic to do a full cluster restart if we are incrementing our major
+ versions of ES (ewolinet@redhat.com)
+- test_oc_scale: add more scale test cases (vrutkovs@redhat.com)
+- test_oc_scale: fix test docstrings (vrutkovs@redhat.com)
+- Import prerequisites.yml for OpenStack (tomas@sedovic.cz)
+- Set the correct path to the openstack.conf file (tomas@sedovic.cz)
+- Return a openshift_node_labels as a dict (tomas@sedovic.cz)
+- Remove last of openshift_node role meta-depends (mgugino@redhat.com)
+- OpenStack provisioning -- support cns. (jmencak@redhat.com)
+- Fix yaml syntax error in the sample inventory (tomas@sedovic.cz)
+- Adding ability to update ami drive size. (kwoodson@redhat.com)
+- Add origin- prefix to ASB image (fabian@fabianism.us)
+- lint issues (davis.phillips@gmail.com)
+- add vsphere examples in hosts.example (davis.phillips@gmail.com)
+- add template and vsphere.conf (davis.phillips@gmail.com)
+- add vsphere cloud providers (davis.phillips@gmail.com)
+- Fix wrong indentation (ichavero@redhat.com)
+- Fix yaml indentation (ichavero@redhat.com)
+- Add iptables rules for flannel (ichavero@redhat.com)
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.16.0
+- Add gluster 3.9 templates (sdodson@redhat.com)
+- Add in-tree CI scripts (mgugino@redhat.com)
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.15.0
+-
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.14.0
+- Cast openshift_docker_use_system_container to bool (mgugino@redhat.com)
+- Correct kublet_args cloud-provider directories (mgugino@redhat.com)
+- Updating logging_facts to be able to pull values from config maps yaml files,
+ use diffs to keep custom changes, white list certain settings when creating
+ diffs (ewolinet@redhat.com)
+- Add docker auth credentials to system container install (mgugino@redhat.com)
+- Move wait_for_pods to it's own play openshift_hosted (mgugino@redhat.com)
+- Remove oauth_template bits from openshift_facts (mgugino@redhat.com)
+
+* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.13.0
+- Bug 1527178 - installation of logging stack failed: Invalid version specified
+ for Elasticsearch (nhosoi@redhat.com)
+- Remove bootstrap.yml from main.yml in openshift_node role
+ (mgugino@redhat.com)
+
+* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.12.0
+-
+
+* Mon Jan 01 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.11.0
+- aws: Fix misnamed variable in provisioning_vars.yml.example
+ (mbarnes@fedoraproject.org)
+- Fix container_runtime openshift_containerized_host_groups
+ (mgugino@redhat.com)
+- Remove references to deployment_type (mgugino@redhat.com)
+- Must directly specify google-cloud-sdk version (ccoleman@redhat.com)
+- daemonset config role. (kwoodson@redhat.com)
+- Move validate_hosts to prerequisites.yml (mgugino@redhat.com)
+- Move sanity_checks into custom action plugin (mgugino@redhat.com)
+- Remove openshift.common.{is_atomic|is_containerized} (mgugino@redhat.com)
+- Adding support for docker-storage-setup on overlay (kwoodson@redhat.com)
+- Add gcloud to the installer image (ccoleman@redhat.com)
+- Remove some small items from openshift_facts (mgugino@redhat.com)
+- Relocate filter plugins to lib_utils (mgugino@redhat.com)
+- Fix hosted_reg_router selectors (mgugino@redhat.com)
+- set repos after registration: convert to match task -> import_role model.
+ (markllama@gmail.com)
+- Remove openshift_node_facts role (mgugino@redhat.com)
+- Move node group tags to openshift_aws_{master,node}_group.
+ (abutcher@redhat.com)
+- Add CentOS-OpenShift-Origin37 repo template. (abutcher@redhat.com)
+- Adding no_log to registry_auth. (kwoodson@redhat.com)
+- Fix rhel_repos disable command (mazzystr@gmail.com)
+- Fix rhel_subscribe boolean (mgugino@redhat.com)
+- Move repo and subscribe to prerequisites (mgugino@redhat.com)
+- Deprecate using Ansible tests as filters (rteague@redhat.com)
+- Removing config trigger for ES DC, updating to use a handler to rollout ES at
+ the end of a deployment, allowing for override with variable
+ (ewolinet@redhat.com)
+- openshift_logging_{fluentd,mux}_file_buffer_limit mismatch
+ (nhosoi@redhat.com)
+- Update version check to Ansible 2.4.1 (rteague@redhat.com)
+- Remove openshift_node_facts part 1 (mgugino@redhat.com)
+- Validate node hostname and IP address (rteague@redhat.com)
+- Add missing openshift_service_type (mgugino@redhat.com)
+- prevent TSB pods from spinning on inappropriate nodes (jminter@redhat.com)
+- Add readiness probe to kuryr controller pod (ltomasbo@redhat.com)
+
* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0
- Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com)
- Commit to stabalize RHSM operations. This code is derived from contrib
@@ -426,7 +551,7 @@ Atomic OpenShift Utilities includes
- Update prometheus to 2.0.0 GA (zgalor@redhat.com)
- remove schedulable from openshift_facts (mgugino@redhat.com)
- inventory: Add example for service catalog vars (smilner@redhat.com)
-- Correct usage of include_role (rteague@redhat.com)
+- Correct usage of import_role (rteague@redhat.com)
- Remove openshift.common.cli_image (mgugino@redhat.com)
- Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
- Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
@@ -1019,7 +1144,7 @@ Atomic OpenShift Utilities includes
- Renaming csr to bootstrap for consistency. (kwoodson@redhat.com)
- Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com)
- Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com)
-- Force include_role to static for loading openshift_facts module
+- Force import_role to static for loading openshift_facts module
(rteague@redhat.com)
- Make openshift-ansible depend on all subpackages (sdodson@redhat.com)
- Refactor health check playbooks (rteague@redhat.com)
@@ -3747,9 +3872,9 @@ Atomic OpenShift Utilities includes
- run node upgrade if master is node as part of the control plan upgrade only
(jchaloup@redhat.com)
- Appease yamllint (sdodson@redhat.com)
-- Adding include_role to block to resolve when eval (ewolinet@redhat.com)
+- Adding import_role to block to resolve when eval (ewolinet@redhat.com)
- Updating oc_apply to use command instead of shell (ewolinet@redhat.com)
-- Wrap openshift_hosted_logging include_role within a block.
+- Wrap openshift_hosted_logging import_role within a block.
(abutcher@redhat.com)
- Adding unit test. Fixed redudant calls to get. (kwoodson@redhat.com)
- Fixing doc and generating new label with updated base. (kwoodson@redhat.com)
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index 69b2541bb..faeb332ad 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -10,7 +10,7 @@
- set_fact:
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
tasks:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 584117e6b..0e0e2b425 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -18,9 +18,8 @@
# Since we're not calling openshift_facts we'll do this for now
- set_fact:
- is_atomic: "{{ ostree_output.rc == 0 }}"
- - set_fact:
- is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+ openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+ openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
# Stop services on all hosts prior to removing files.
- hosts: nodes
@@ -133,7 +132,7 @@
when: openshift_use_flannel | default(false) | bool
register: result
until: result is succeeded
- when: not is_atomic | bool
+ when: not openshift_is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -363,7 +362,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- atomic-openshift
- atomic-openshift-clients
@@ -487,14 +486,14 @@
- name: Stop additional atomic services
service: name={{ item }} state=stopped
- when: is_containerized | bool
+ when: openshift_is_containerized | bool
with_items:
- etcd_container
failed_when: false
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- etcd
- etcd3
@@ -554,7 +553,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+ when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
register: result
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index b03fb0b7f..a3fc82f9a 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -2,7 +2,7 @@
- name: Setup the master node group
hosts: localhost
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: setup_master_group.yml
@@ -11,7 +11,7 @@
gather_facts: no
remote_user: root
tasks:
- - include_role:
+ - import_role:
name: openshift_aws
tasks_from: master_facts.yml
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 4b5bd22ea..d538b862d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,8 +1,7 @@
---
-- name: Setup the elb and the master node group
+- name: Alert user to variables needed
hosts: localhost
tasks:
-
- name: Alert user to variables needed - clusterid
debug:
msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
@@ -11,7 +10,14 @@
debug:
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+- import_playbook: provision_s3.yml
+
+- import_playbook: provision_elb.yml
+
+- name: Create the master node group
+ hosts: localhost
+ tasks:
- name: provision cluster
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_elb.yml b/playbooks/aws/openshift-cluster/provision_elb.yml
new file mode 100644
index 000000000..9f27dca3b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_elb.yml
@@ -0,0 +1,9 @@
+---
+- name: Create elb
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: provision elb
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_elb.yml
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
index 6e843453c..6c7c1f069 100644
--- a/playbooks/aws/openshift-cluster/provision_instance.yml
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -7,6 +7,6 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 44c686e08..82f147865 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -13,6 +13,6 @@
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: create the node groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provision_s3.yml b/playbooks/aws/openshift-cluster/provision_s3.yml
new file mode 100644
index 000000000..45b439083
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Create s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: create s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
index 7d74a691a..a0d4ec728 100644
--- a/playbooks/aws/openshift-cluster/provision_sec_group.yml
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -7,7 +7,7 @@
gather_facts: no
tasks:
- name: create security groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: security_group.yml
when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
index 3ec683958..d86ff9f9b 100644
--- a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create an instance and prepare for ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: ssh_keys.yml
vars:
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
index 0a23a6d32..cf72f6c87 100644
--- a/playbooks/aws/openshift-cluster/provision_vpc.yml
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -4,7 +4,7 @@
gather_facts: no
tasks:
- name: create a vpc
- include_role:
+ import_role:
name: openshift_aws
tasks_from: vpc.yml
when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
index 8239a64fb..f315db604 100644
--- a/playbooks/aws/openshift-cluster/seal_ami.yml
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -7,6 +7,6 @@
become: no
tasks:
- name: seal the ami
- include_role:
+ import_role:
name: openshift_aws
tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index 1491fb868..f6b1a6b5d 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -46,7 +46,7 @@ openshift_pkg_version: # -3.7.0
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
# vpc + subnet.
-#openshift_aws_subnet_name:
+#openshift_aws_subnet_az:
# -------------- #
# Security Group #
@@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key
# --------- #
# Variables in this section apply to building a node AMI for use in your
# openshift cluster.
+# openshift-ansible will perform the container runtime storage setup when specified
+# The current storage setup with require a drive if using a separate storage device
+# for the container runtime.
+container_runtime_docker_storage_type: overlay2
+container_runtime_docker_storage_setup_device: /dev/xvdb
# must specify a base_ami when building an AMI
openshift_aws_base_ami: # ami-12345678
diff --git a/playbooks/byo/filter_plugins b/playbooks/byo/filter_plugins
deleted file mode 120000
index a4f518f07..000000000
--- a/playbooks/byo/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/lookup_plugins b/playbooks/byo/lookup_plugins
deleted file mode 120000
index c528bcd1d..000000000
--- a/playbooks/byo/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/filter_plugins b/playbooks/byo/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/byo/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 3cb11a457..f70f05bac 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -6,9 +6,9 @@
roles:
- role: rhel_subscribe
when:
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- ansible_distribution == "RedHat"
- - rhsub_user | default(False)
- - rhsub_pass | default(False)
+ - rhsub_user is defined
+ - rhsub_pass is defined
- role: openshift_repos
- role: os_update_latest
diff --git a/playbooks/cluster-operator/aws/infrastructure.yml b/playbooks/cluster-operator/aws/infrastructure.yml
new file mode 100644
index 000000000..9669820fb
--- /dev/null
+++ b/playbooks/cluster-operator/aws/infrastructure.yml
@@ -0,0 +1,21 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_s3.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_elb.yml
diff --git a/playbooks/cluster-operator/aws/roles b/playbooks/cluster-operator/aws/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/cluster-operator/aws/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-cluster/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 23cf8cf76..6d82fa928 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -2,7 +2,6 @@
- name: Create local temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -11,8 +10,15 @@
changed_when: false
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+ - name: Chmod local temp directory
+ local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}"
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
- name: Create service signer certificate
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Create remote temp directory for creating certs
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
@@ -22,7 +28,7 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
--key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
--name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
@@ -65,7 +71,6 @@
- name: Delete local temp directory
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Delete local temp directory
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 42cd51bd9..8392e21ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -12,14 +12,11 @@
roles:
- openshift_facts
tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -54,13 +51,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
until: not (l_docker_upgrade_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_docker_upgrade_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 385a141ea..3b47a11e0 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -15,7 +15,7 @@
- "{{ openshift_service_type }}-master-controllers"
- "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index b5000d3a1..54eeb2ef5 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -10,7 +10,7 @@
- etcd_container
- openvswitch
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 50df8a890..f790fd98d 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -1,7 +1,13 @@
---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+ hosts: oo_first_master
+ roles:
+ - role: openshift_web_console
+ when: openshift_web_console_install | default(true) | bool
+
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
@@ -27,8 +33,8 @@
- set_fact:
haproxy_routers: "{{ all_routers.results.results[0]['items'] |
- oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
- oo_select_keys_from_list(['metadata']) }}"
+ lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ lib_utils_oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index d5b82d9a0..da63450b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -1,4 +1,6 @@
---
+# for control-plane upgrade, several variables may be passed in to this play
+# why may affect the tasks here and in imported playbooks.
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
@@ -14,10 +16,10 @@
hosts: "{{ l_upgrade_no_proxy_hosts }}"
tasks:
- set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
when:
- openshift_http_proxy is defined or openshift_https_proxy is defined
@@ -48,6 +50,8 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
+ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
+ # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
@@ -72,6 +76,6 @@
- name: Verify docker upgrade targets
hosts: "{{ l_upgrade_docker_target_hosts }}"
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 3fc18c9b7..693ab2d96 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -5,11 +5,6 @@
hosts: oo_first_master
gather_facts: no
tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
@@ -71,7 +66,7 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - when: openshift.common.is_containerized | bool
+ - when: openshift_is_containerized | bool
block:
- set_fact:
master_services:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 065a9a8ab..45ddf7eea 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -5,7 +5,7 @@
when: openshift.common.version is not defined
- name: Update oreg_auth docker login credentials if necessary
- include_role:
+ import_role:
name: container_runtime
tasks_from: registry_auth.yml
when: oreg_auth_user is defined
@@ -15,13 +15,13 @@
docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift_service_type }}"
+ name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: repoquery_out
@@ -49,5 +49,5 @@
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- - deployment_type == 'origin'
+ - openshift_deployment_type == 'origin'
- openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 44724e979..e89f06f17 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -22,10 +22,12 @@
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade all storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -49,10 +51,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
+ roles:
+ - openshift_facts
tasks:
- - include_role:
- name: openshift_facts
-
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
@@ -60,7 +61,7 @@
- include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: upgrade.yml
@@ -86,7 +87,7 @@
- name: Post master upgrade - Upgrade clusterpolicies storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
when:
@@ -108,12 +109,11 @@
- name: Gate on master update
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
@@ -128,12 +128,13 @@
hosts: oo_masters_to_config
roles:
- { role: openshift_cli }
+ - { role: openshift_facts }
vars:
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
when: openshift_version is version_compare('3.7','<')
@@ -144,7 +145,7 @@
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
@@ -160,7 +161,7 @@
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
register: reconcile_jenkins_role_binding_result
changed_when:
@@ -214,7 +215,7 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
@@ -223,7 +224,7 @@
- name: Migrate storage post policy reconciliation
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
run_once: true
register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -242,12 +243,11 @@
- name: Gate on reconcile
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
@@ -262,7 +262,7 @@
- openshift_facts
tasks:
- include_tasks: docker/tasks/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -291,21 +291,25 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not (l_upgrade_control_plane_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_control_plane_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
roles:
- openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 956ad0d53..850442b3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,11 +4,9 @@
roles:
- role: openshift_facts
tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade_pre.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
@@ -35,19 +33,23 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: 60
- delay: 60
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
- vars:
- openshift_node_upgrade_in_progress: True
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -62,7 +64,7 @@
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
tasks:
- - include_role:
+ - import_role:
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index e8c0f361a..e259b5d09 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -3,7 +3,7 @@
hosts: localhost
tasks:
- name: build upgrade scale groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: upgrade_node_group.yml
@@ -43,24 +43,24 @@
tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
--force --delete-local-data --ignore-daemonsets
--timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not (l_upgrade_nodes_drain_result is failed)
- retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
delay: 5
failed_when:
- l_upgrade_nodes_drain_result is failed
- - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
# Alright, let's clean up!
- name: clean up the old scale group
hosts: localhost
tasks:
- name: clean up scale group
- include_role:
+ import_role:
name: openshift_aws
tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index a5ad3801d..d520c6aee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -13,7 +13,7 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 1498db4c5..eb5f07ae0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -14,16 +14,21 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 6958652d8..4febe76ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -15,7 +15,7 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1750148d4..8d42e4c91 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -14,6 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +24,11 @@
openshift_upgrade_min: '3.6'
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index c8c87a9c3..9c7688981 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,7 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+ - { role: openshift_facts }
tasks:
- name: Check for invalid namespaces and SDN errors
@@ -14,7 +15,7 @@
# DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
- {{ openshift.common.client_binary }} adm migrate authorization
+ {{ openshift_client_binary }} adm migrate authorization
when:
- openshift_currently_installed_version is version_compare('3.7','<')
- openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 0f74e0137..a9bf354cc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -35,8 +35,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 08bfd239f..51da45311 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -14,6 +14,8 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +25,11 @@
openshift_upgrade_min: '3.7'
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
@@ -42,8 +48,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index 1d4d1919c..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,20 +1 @@
---
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 0aea5069d..20e0c165e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -10,6 +10,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
@@ -31,8 +32,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -41,13 +40,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 05aa737c6..384eeed4c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -14,37 +14,84 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-- name: Configure the upgrade target for the common upgrade tasks
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.9'
+ openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.8'
+ _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_no_proxy_hosts: "oo_masters_to_config"
l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
# Pre-upgrade completed
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ openshift_release: '3.8'
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
+
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.8'
+ openshift_release: '3.9'
+ openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
+ openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -53,13 +100,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 1d1b255c1..859b1d88b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -12,6 +12,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
index 4bd2d87b1..d8540abfb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -1,5 +1,5 @@
---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
hosts: oo_first_master
roles:
- { role: lib_openshift }
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
new file mode 100644
index 000000000..7fd60743c
--- /dev/null
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -0,0 +1,6 @@
+---
+- name: create oo_hosts_containerized_managed_true host group
+ hosts: oo_all_hosts:!oo_nodes_to_config
+ tasks:
+ - group_by:
+ key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 67445edeb..7a49adcf0 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,26 +1,23 @@
---
-- hosts: "{{ l_containerized_host_groups }}"
- vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
- l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
- # role: container_runtime is necessary here to bring role default variables
- # into the play scope.
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
roles:
- role: container_runtime
tasks:
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: package_docker.yml
when:
- not openshift_docker_use_system_container | bool
- not openshift_use_crio_only | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: systemcontainer_docker.yml
when:
- openshift_docker_use_system_container | bool
- not openshift_use_crio_only | bool
- - include_role:
+ - import_role:
name: container_runtime
tasks_from: systemcontainer_crio.yml
when:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
new file mode 100644
index 000000000..a6d396270
--- /dev/null
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -0,0 +1,18 @@
+---
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+ vars:
+ l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_storage_setup_overlay.yml
+ when:
+ - container_runtime_docker_storage_type|default('') == "overlay2"
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/setup_storage.yml b/playbooks/container-runtime/setup_storage.yml
new file mode 100644
index 000000000..98e876b2c
--- /dev/null
+++ b/playbooks/container-runtime/setup_storage.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/setup_storage.yml
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 0e6bde09a..5efdc486a 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -22,6 +22,9 @@
- import_playbook: openshift-hosted/private/config.yml
+- import_playbook: openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
- import_playbook: openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/provision.yml
index 6016e6a78..b6edf9961 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -6,7 +6,7 @@
tasks:
- name: provision a GCP cluster in the specified project
- include_role:
+ import_role:
name: openshift_gcp
- name: run the cluster deploy
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
new file mode 100644
index 000000000..15b3dd492
--- /dev/null
+++ b/playbooks/init/base_packages.yml
@@ -0,0 +1,37 @@
+---
+- name: Install packages necessary for installer
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ tasks:
+ - when:
+ - not openshift_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - yum-utils
+ register: result
+ until: result is succeeded
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+ register: result
+ until: result is succeeded
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 8087f6ffc..c4cd226c9 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -2,7 +2,6 @@
- name: Populate config host groups
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Load group name mapping variables
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
index 4d40e472c..8e4206948 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/facts.yml
@@ -5,7 +5,9 @@
tasks:
- name: Initialize host facts
- hosts: oo_all_hosts
+ # l_upgrade_non_node_hosts is passed in via play during control-plane-only
+ # upgrades; otherwise oo_all_hosts is used.
+ hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
tasks:
- name: load openshift_facts module
import_role:
@@ -13,7 +15,7 @@
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
- include_role:
+ import_role:
name: openshift_sanitize_inventory
- name: Detecting Operating System from ostree_booted
@@ -21,40 +23,24 @@
path: /run/ostree-booted
register: ostree_booted
- # Locally setup containerized facts for now
- - name: initialize_facts set fact l_is_atomic
- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
- - name: initialize_facts set fact for containerized and l_is_*_system_container
+ # TODO(michaelgugino) remove this line once CI is updated.
+ - name: set openshift_deployment_type if unset
set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist is fedora and python is v3
- fail:
- msg: |
- openshift-ansible requires Python 3 for {{ ansible_distribution }};
- For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ openshift_deployment_type: "{{ deployment_type }}"
when:
- - ansible_distribution == 'Fedora'
- - ansible_python['version']['major'] != 3
+ - openshift_deployment_type is undefined
+ - deployment_type is defined
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist not Fedora and python must be v2
- fail:
- msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when:
- - ansible_distribution != 'Fedora'
- - ansible_python['version']['major'] != 2
+ - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
+ set_fact:
+ openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
+ openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
# TODO: Should this be moved into health checks??
# Seems as though any check that happens with a corresponding fail should move into health_checks
# Fail as early as possible if Atomic and old version of Docker
- when:
- - l_is_atomic | bool
+ - openshift_is_atomic | bool
block:
# See https://access.redhat.com/articles/2317361
@@ -72,40 +58,7 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - when:
- - not l_is_atomic | bool
- block:
- - name: Ensure openshift-ansible installer package deps are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - yum-utils
- register: result
- until: result is succeeded
-
- - name: Ensure various deps for running system containers are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - atomic
- - ostree
- - runc
- when:
- - >
- (openshift_use_system_containers | default(False)) | bool
- or (openshift_use_etcd_system_container | default(False)) | bool
- or (openshift_use_openvswitch_system_container | default(False)) | bool
- or (openshift_use_node_system_container | default(False)) | bool
- or (openshift_use_master_system_container | default(False)) | bool
- register: result
- until: result is succeeded
-
- - name: Gather Cluster facts and set is_containerized if needed
+ - name: Gather Cluster facts
openshift_facts:
role: common
local_facts:
@@ -113,7 +66,6 @@
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
@@ -126,10 +78,10 @@
openshift_facts:
role: common
local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
when:
- openshift_http_proxy is defined or openshift_https_proxy is defined
@@ -141,7 +93,14 @@
local_facts:
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- - name: initialize_facts set_fact repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
- repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"
+- name: Initialize special first-master variables
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - set_fact:
+ # We need to setup openshift_client_binary here for special uses of delegate_to in
+ # later roles and plays.
+ first_master_client_binary: "{{ openshift_client_binary }}"
+ #Some roles may require this to be set for first master
+ openshift_client_binary: "{{ openshift_client_binary }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 06e8ba504..8a3f4682d 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -17,15 +17,12 @@
- import_playbook: facts.yml
-- import_playbook: sanity_checks.yml
- when: not (skip_sanity_checks | default(False))
-
-- import_playbook: validate_hostnames.yml
- when: not (skip_validate_hostnames | default(False))
-
- import_playbook: version.yml
when: not (skip_verison | default(False))
+- import_playbook: sanity_checks.yml
+ when: not (skip_sanity_checks | default(False))
+
- name: Initialization Checkpoint End
hosts: all
gather_facts: false
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index 048b09e60..667f38ddd 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -4,13 +4,13 @@
gather_facts: no
tasks:
- name: subscribe instances to Red Hat Subscription Manager
- include_role:
+ import_role:
name: rhel_subscribe
when:
- ansible_distribution == 'RedHat'
- - deployment_type == 'openshift-enterprise'
- - rhsub_user | default(False)
- - rhsub_pass | default(False)
+ - openshift_deployment_type == 'openshift-enterprise'
+ - rhsub_user is defined
+ - rhsub_pass is defined
- name: initialize openshift repos
- include_role:
+ import_role:
name: openshift_repos
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
index 26716a92d..52bcf42c0 100644
--- a/playbooks/init/sanity_checks.yml
+++ b/playbooks/init/sanity_checks.yml
@@ -1,51 +1,15 @@
---
- name: Verify Requirements
- hosts: oo_all_hosts
+ hosts: oo_first_master
+ roles:
+ - role: lib_utils
tasks:
- - fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
- - fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
- - fail:
- msg: openshift_public_hostname must be 63 characters or less
- when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
+ # sanity_checks is a custom action plugin defined in lib_utils.
+ # This module will loop through all the hostvars for each host
+ # specified in check_hosts.
+ # Since sanity_checks is an action_plugin, it executes on the control host.
+ # Thus, sanity_checks cannot gather new information about any hosts.
+ - name: Run variable sanity checks
+ sanity_checks:
+ check_hosts: "{{ groups['oo_all_hosts'] }}"
+ run_once: True
diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml
index 37a5284d5..962ee7220 100644
--- a/playbooks/init/version.yml
+++ b/playbooks/init/version.yml
@@ -2,20 +2,32 @@
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
- roles:
- - openshift_version
+ tasks:
+ - include_role:
+ name: openshift_version
+ tasks_from: first_master.yml
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}"
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
- name: Set openshift_version for etcd, node, and master hosts
- hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
+ hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}"
vars:
- openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- pre_tasks:
+ l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
+ l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
+ l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
+ tasks:
- set_fact:
- openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
- roles:
- - openshift_version
+ openshift_version: "{{ l_first_master_openshift_version }}"
+ openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}"
+ openshift_image_tag: "{{ l_first_master_openshift_image_tag }}"
+
+# NOTE: These steps should only be run against masters and nodes.
+- name: Ensure the requested version packages are available.
+ hosts: "{{ l_openshift_version_check_hosts | default('oo_nodes_to_config:oo_masters_to_config:!oo_first_master') }}"
+ tasks:
+ - include_role:
+ name: openshift_version
+ tasks_from: masters_and_nodes.yml
diff --git a/playbooks/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml
index 414090733..249222ae4 100644
--- a/playbooks/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/adhoc.yml
@@ -11,6 +11,7 @@
# usage. Running this play only in localhost speeds up execution.
hosts: localhost
connection: local
+ gather_facts: false
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index f3bb3c2d1..72c39d546 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -5,7 +5,7 @@
- role: openshift_clock
- role: openshift_etcd_facts
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: ca.yml
vars:
diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index ce21a1f96..2f9bef799 100644
--- a/playbooks/openshift-etcd/private/certificates-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -3,10 +3,10 @@
hosts: oo_first_etcd
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_generated_certificates.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: remove_generated_certificates.yml
@@ -14,6 +14,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index be177b714..674bd5088 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -18,7 +18,7 @@
- role: openshift_facts
tasks:
- name: Check the master API is ready
- include_role:
+ import_role:
name: openshift_master
tasks_from: check_master_api_is_ready.yml
- set_fact:
@@ -31,8 +31,8 @@
name: "{{ master_service }}"
state: stopped
# 2. backup embedded etcd
- # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- - include_role:
+ # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -40,7 +40,7 @@
r_etcd_common_embedded_etcd: "{{ true }}"
r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.archive.yml
vars:
@@ -56,7 +56,7 @@
- name: Backup etcd client certificates for master host
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_master_etcd_certificates.yml
@@ -73,10 +73,10 @@
hosts: oo_etcd_to_config[0]
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: disable_etcd.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: clean_data.yml
@@ -89,9 +89,12 @@
local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
register: g_etcd_client_mktemp
changed_when: False
- become: no
- - include_role:
+ - name: Chmod local temp directory for syncing etcd backup
+ local_action: command chmod 777 "{{ g_etcd_client_mktemp.stdout }}"
+ changed_when: False
+
+ - import_role:
name: etcd
tasks_from: backup.fetch.yml
vars:
@@ -101,7 +104,7 @@
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
delegate_to: "{{ groups.oo_first_master[0] }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.copy.yml
vars:
@@ -116,20 +119,19 @@
- name: Delete temporary directory
local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
changed_when: False
- become: no
# 7. force new cluster from the backup
- name: Force new etcd cluster
hosts: oo_etcd_to_config[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.unarchive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.force_new_cluster.yml
vars:
@@ -143,7 +145,7 @@
- name: Configure master to use external etcd
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: configure_external_etcd.yml
vars:
diff --git a/playbooks/openshift-etcd/private/filter_plugins b/playbooks/openshift-etcd/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-etcd/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-etcd/private/lookup_plugins b/playbooks/openshift-etcd/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-etcd/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 313ed8bec..3f8b44032 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -2,7 +2,6 @@
- name: Check if the master has embedded etcd
hosts: localhost
connection: local
- become: no
gather_facts: no
tags:
- always
@@ -15,7 +14,7 @@
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.pre_check.yml
vars:
@@ -43,7 +42,7 @@
roles:
- role: openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -53,12 +52,11 @@
- name: Gate on etcd backup
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
- fail:
@@ -70,7 +68,7 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: disable_etcd.yml
@@ -78,7 +76,7 @@
hosts: oo_etcd_to_migrate[0]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.yml
vars:
@@ -90,7 +88,7 @@
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: clean_data.yml
vars:
@@ -118,15 +116,15 @@
tasks:
- set_fact:
etcd_migration_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- set_fact:
etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
- name: Add TTLs on the first master
hosts: oo_first_master[0]
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.add_ttls.yml
vars:
@@ -138,7 +136,7 @@
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index 158bcb849..a3acf6945 100644
--- a/playbooks/openshift-etcd/private/redeploy-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -14,10 +14,10 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup_ca_certificates.yml
- - include_role:
+ - import_role:
name: etcd
tasks_from: remove_ca_certificates.yml
@@ -26,7 +26,6 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -34,10 +33,14 @@
register: g_etcd_mktemp
changed_when: false
+ - name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}"
+ changed_when: false
+
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: distribute_ca.yml
vars:
@@ -47,14 +50,14 @@
- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
when: ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: retrieve_ca_certificates.yml
vars:
@@ -74,7 +77,6 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file:
@@ -87,15 +89,15 @@
when:
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
diff --git a/playbooks/openshift-etcd/private/restart.yml b/playbooks/openshift-etcd/private/restart.yml
index 0751480e2..a2a53651b 100644
--- a/playbooks/openshift-etcd/private/restart.yml
+++ b/playbooks/openshift-etcd/private/restart.yml
@@ -3,7 +3,7 @@
hosts: oo_etcd_to_config
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: restart.yml
when:
@@ -12,7 +12,7 @@
- name: Restart etcd
hosts: oo_etcd_to_config
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: restart.yml
when:
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index 3ef043ec8..8a9811a25 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -30,7 +30,7 @@
retries: 3
delay: 10
until: etcd_add_check.rc == 0
- - include_role:
+ - import_role:
name: etcd
tasks_from: server_certificates.yml
vars:
@@ -69,13 +69,13 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
roles:
- role: openshift_master_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 695b53990..ebcf4a5ff 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -5,7 +5,7 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: server_certificates.yml
vars:
diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 7dfea07f1..081c024fc 100644
--- a/playbooks/openshift-etcd/private/upgrade_backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -4,7 +4,7 @@
roles:
- role: openshift_etcd_facts
post_tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: backup.yml
vars:
@@ -14,12 +14,11 @@
- name: Gate on etcd backup
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_etcd_hosts_to_backup)
+ | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- fail:
diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index 339fc6b74..f9e50e748 100644
--- a/playbooks/openshift-etcd/private/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -1,12 +1,12 @@
---
# INPUT etcd_upgrade_version
# INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_image.yml
vars:
@@ -14,4 +14,4 @@
etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index e373a4a4c..8997680f9 100644
--- a/playbooks/openshift-etcd/private/upgrade_main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -14,7 +14,7 @@
- name: Drop etcdctl profiles
hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: drop_etcdctl.yml
diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index 327a35b09..e78cc5826 100644
--- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -1,12 +1,12 @@
---
# INPUT etcd_upgrade_version
# INPUT etcd_rpm_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_rpm.yml
vars:
@@ -15,4 +15,4 @@
when:
- etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
index 60127fc68..6aec838d4 100644
--- a/playbooks/openshift-etcd/private/upgrade_step.yml
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -2,7 +2,7 @@
- name: Determine etcd version
hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: version_detect.yml
@@ -54,11 +54,11 @@
hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include_role:
+ - import_role:
name: etcd
tasks_from: upgrade_image.yml
vars:
etcd_peer: "{{ openshift.common.hostname }}"
when:
- ansible_distribution == 'Fedora'
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
index 753878d70..8ea1994f7 100644
--- a/playbooks/openshift-etcd/redeploy-certificates.yml
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -5,6 +5,6 @@
- import_playbook: private/restart.yml
vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
- import_playbook: ../openshift-master/private/restart.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index ccc797527..71606e7e4 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -1,4 +1,7 @@
---
-- import_playbook: ../init/evaluate_groups.yml
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md
index 107bbfff6..19c381490 100644
--- a/playbooks/openshift-glusterfs/README.md
+++ b/playbooks/openshift-glusterfs/README.md
@@ -63,7 +63,7 @@ glusterfs
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
[masters]
master
diff --git a/playbooks/openshift-glusterfs/private/config.yml b/playbooks/openshift-glusterfs/private/config.yml
index 19e14ab3e..9a5bc143d 100644
--- a/playbooks/openshift-glusterfs/private/config.yml
+++ b/playbooks/openshift-glusterfs/private/config.yml
@@ -14,12 +14,12 @@
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: kernel_modules.yml
when:
@@ -28,12 +28,12 @@
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
tasks:
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
- - include_role:
+ - import_role:
name: openshift_storage_glusterfs
tasks_from: kernel_modules.yml
when:
@@ -43,7 +43,7 @@
hosts: oo_first_master
tasks:
- name: setup glusterfs
- include_role:
+ import_role:
name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/openshift-glusterfs/private/filter_plugins b/playbooks/openshift-glusterfs/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-glusterfs/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-glusterfs/private/lookup_plugins b/playbooks/openshift-glusterfs/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-glusterfs/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-grafana/config.yml b/playbooks/openshift-grafana/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-grafana/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-grafana/private/config.yml b/playbooks/openshift-grafana/private/config.yml
new file mode 100644
index 000000000..ac753d63b
--- /dev/null
+++ b/playbooks/openshift-grafana/private/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Deploy grafana server
+ hosts: masters
+ tasks:
+ - include_role:
+ name: openshift_grafana
diff --git a/playbooks/aws/openshift-cluster/filter_plugins b/playbooks/openshift-grafana/private/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/aws/openshift-cluster/filter_plugins
+++ b/playbooks/openshift-grafana/private/filter_plugins
diff --git a/playbooks/aws/openshift-cluster/lookup_plugins b/playbooks/openshift-grafana/private/lookup_plugins
index ac79701db..ac79701db 120000
--- a/playbooks/aws/openshift-cluster/lookup_plugins
+++ b/playbooks/openshift-grafana/private/lookup_plugins
diff --git a/playbooks/openshift-grafana/private/roles b/playbooks/openshift-grafana/private/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/openshift-grafana/private/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/openshift-hosted/private/config.yml b/playbooks/openshift-hosted/private/config.yml
index 036fe654d..4e7b98da2 100644
--- a/playbooks/openshift-hosted/private/config.yml
+++ b/playbooks/openshift-hosted/private/config.yml
@@ -21,6 +21,10 @@
- import_playbook: openshift_hosted_registry.yml
+- import_playbook: openshift_hosted_wait_for_pods.yml
+
+- import_playbook: openshift_hosted_registry_storage.yml
+
- import_playbook: cockpit-ui.yml
- import_playbook: install_docker_gc.yml
diff --git a/playbooks/openshift-hosted/private/install_docker_gc.yml b/playbooks/openshift-hosted/private/install_docker_gc.yml
index 1e3dfee07..03eb542d3 100644
--- a/playbooks/openshift-hosted/private/install_docker_gc.yml
+++ b/playbooks/openshift-hosted/private/install_docker_gc.yml
@@ -3,5 +3,5 @@
hosts: oo_first_master
gather_facts: false
tasks:
- - include_role:
+ - import_role:
name: openshift_docker_gc
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
index d5ca5185c..b09432da2 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml
@@ -2,6 +2,6 @@
- name: Create Hosted Resources - openshift projects
hosts: oo_first_master
tasks:
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: create_projects.yml
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
index 2a91a827c..659c95eda 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: registry.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
new file mode 100644
index 000000000..cfc47c9b2
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
@@ -0,0 +1,13 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: registry_storage.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_router.yml b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
index bcb5a34a4..353377189 100644
--- a/playbooks/openshift-hosted/private/openshift_hosted_router.yml
+++ b/playbooks/openshift-hosted/private/openshift_hosted_router.yml
@@ -5,7 +5,7 @@
- set_fact:
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: router.yml
when:
diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
new file mode 100644
index 000000000..1f6868c2a
--- /dev/null
+++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml
@@ -0,0 +1,26 @@
+---
+# This playbook waits for registry and router pods after both have been
+# created. It is intended to allow the tasks of deploying both to complete
+# before polling to save time.
+- name: Poll for hosted pod deployments
+ hosts: oo_first_master
+ tasks:
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
+
+ - import_role:
+ name: openshift_hosted
+ tasks_from: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index 7e9363c5f..b817221b8 100644
--- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
@@ -17,7 +17,7 @@
- name: Determine if docker-registry exists
command: >
- {{ openshift.common.client_binary }} get dc/docker-registry -o json
+ {{ openshift_client_binary }} get dc/docker-registry -o json
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
register: l_docker_registry_dc
@@ -26,11 +26,11 @@
- set_fact:
docker_registry_env_vars: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
+ | lib_utils_oo_collect('name'))
| default([]) }}"
docker_registry_secrets: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
+ | lib_utils_oo_collect('secret')
+ | lib_utils_oo_collect('secretName'))
| default([]) }}"
changed_when: false
when: l_docker_registry_dc.rc == 0
@@ -38,7 +38,7 @@
# Replace dc/docker-registry environment variable certificate data if set.
- name: Update docker-registry environment variables
shell: >
- {{ openshift.common.client_binary }} env dc/docker-registry
+ {{ openshift_client_binary }} env dc/docker-registry
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"
@@ -62,7 +62,7 @@
- name: Generate registry certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
+ {{ openshift_client_binary }} adm ca create-server-cert
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
@@ -88,7 +88,7 @@
- name: Redeploy docker registry
command: >
- {{ openshift.common.client_binary }} deploy dc/docker-registry
+ {{ openshift_client_binary }} deploy dc/docker-registry
--latest
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 2116c745c..0df748f47 100644
--- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -17,7 +17,7 @@
- name: Determine if router exists
command: >
- {{ openshift.common.client_binary }} get dc/router -o json
+ {{ openshift_client_binary }} get dc/router -o json
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_dc
@@ -26,7 +26,7 @@
- name: Determine if router service exists
command: >
- {{ openshift.common.client_binary }} get svc/router -o json
+ {{ openshift_client_binary }} get svc/router -o json
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_svc
@@ -36,11 +36,11 @@
- name: Collect router environment variables and secrets
set_fact:
router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
+ | lib_utils_oo_collect('name'))
| default([]) }}"
router_secrets: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
+ | lib_utils_oo_collect('secret')
+ | lib_utils_oo_collect('secretName'))
| default([]) }}"
changed_when: false
when: l_router_dc.rc == 0
@@ -52,7 +52,7 @@
- name: Update router environment variables
shell: >
- {{ openshift.common.client_binary }} env dc/router
+ {{ openshift_client_binary }} env dc/router
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
@@ -78,7 +78,7 @@
- name: Remove router service annotations
command: >
- {{ openshift.common.client_binary }} annotate service/router
+ {{ openshift_client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name-
service.alpha.openshift.io/serving-cert-signed-by-
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
@@ -86,7 +86,7 @@
- name: Add serving-cert-secret annotation to router service
command: >
- {{ openshift.common.client_binary }} annotate service/router
+ {{ openshift_client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
@@ -115,7 +115,7 @@
- ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- - include_role:
+ - import_role:
name: openshift_hosted
tasks_from: main
vars:
@@ -129,7 +129,7 @@
- name: Redeploy router
command: >
- {{ openshift.common.client_binary }} deploy dc/router
+ {{ openshift_client_binary }} deploy dc/router
--latest
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 2636d857e..4a83dd955 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -15,16 +15,16 @@
hosts: oo_lb_to_config
vars:
openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']),
+ | lib_utils_oo_loadbalancer_frontends(hostvars | lib_utils_oo_select_keys(groups['oo_masters']),
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_frontends | default([]) }}"
openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']),
+ | lib_utils_oo_loadbalancer_backends(hostvars | lib_utils_oo_select_keys(groups['oo_masters']),
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_masters_to_config.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
- role: tuned
diff --git a/playbooks/openshift-loadbalancer/private/filter_plugins b/playbooks/openshift-loadbalancer/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-loadbalancer/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-loadbalancer/private/lookup_plugins b/playbooks/openshift-loadbalancer/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-loadbalancer/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index bc59bd95a..d6b26647c 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -16,11 +16,12 @@
roles:
- openshift_logging
+# TODO: Remove when master config property is removed
- name: Update Master configs
hosts: oo_masters:!oo_first_master
tasks:
- block:
- - include_role:
+ - import_role:
name: openshift_logging
tasks_from: update_master_config
diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-logging/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library
deleted file mode 120000
index ba40d2f56..000000000
--- a/playbooks/openshift-logging/private/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-logging/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-management/add_many_container_providers.yml b/playbooks/openshift-management/add_many_container_providers.yml
index 62fdb11c5..45231a495 100644
--- a/playbooks/openshift-management/add_many_container_providers.yml
+++ b/playbooks/openshift-management/add_many_container_providers.yml
@@ -27,7 +27,7 @@
register: results
# Include openshift_management for access to filter_plugins.
- - include_role:
+ - import_role:
name: openshift_management
tasks_from: noop
diff --git a/playbooks/openshift-management/private/add_container_provider.yml b/playbooks/openshift-management/private/add_container_provider.yml
index facb3a5b9..25d4058e5 100644
--- a/playbooks/openshift-management/private/add_container_provider.yml
+++ b/playbooks/openshift-management/private/add_container_provider.yml
@@ -3,6 +3,6 @@
hosts: oo_first_master
tasks:
- name: Run the Management Integration Tasks
- include_role:
+ import_role:
name: openshift_management
tasks_from: add_container_provider
diff --git a/playbooks/openshift-management/private/config.yml b/playbooks/openshift-management/private/config.yml
index 3f1cdf713..22f3ee8f3 100644
--- a/playbooks/openshift-management/private/config.yml
+++ b/playbooks/openshift-management/private/config.yml
@@ -21,7 +21,7 @@
tasks:
- name: Run the CFME Setup Role
- include_role:
+ import_role:
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
diff --git a/playbooks/openshift-management/private/filter_plugins b/playbooks/openshift-management/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-management/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-management/private/library b/playbooks/openshift-management/private/library
deleted file mode 120000
index ba40d2f56..000000000
--- a/playbooks/openshift-management/private/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library \ No newline at end of file
diff --git a/playbooks/openshift-management/private/uninstall.yml b/playbooks/openshift-management/private/uninstall.yml
index 9f35cc276..6097ea45a 100644
--- a/playbooks/openshift-management/private/uninstall.yml
+++ b/playbooks/openshift-management/private/uninstall.yml
@@ -3,6 +3,6 @@
hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
- include_role:
+ import_role:
name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index a90cd6b22..85be0e600 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -30,8 +30,8 @@
when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- - not openshift.common.is_atomic | bool
- - deployment_type == 'openshift-enterprise'
+ - not openshift_is_atomic | bool
+ - openshift_deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
- role: flannel_register
diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..56af18ca7 100644
--- a/playbooks/openshift-master/private/certificates-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
@@ -28,6 +28,7 @@
path: "{{ openshift.common.config_base }}/master/{{ item }}"
state: absent
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- "etcd.server.crt"
- "etcd.server.key"
diff --git a/playbooks/openshift-master/private/certificates.yml b/playbooks/openshift-master/private/certificates.yml
index f6afbc36f..d42d4402b 100644
--- a/playbooks/openshift-master/private/certificates.yml
+++ b/playbooks/openshift-master/private/certificates.yml
@@ -9,6 +9,6 @@
- role: openshift_ca
- role: openshift_master_certificates
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 15d301ddb..153ea9993 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -47,7 +47,7 @@
state: absent
when:
- rpmgenerated_config.stat.exists == true
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
with_items:
- master
- node
@@ -56,9 +56,9 @@
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config']
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config']
| default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
roles:
- openshift_facts
@@ -150,8 +150,8 @@
hosts: oo_first_master
vars:
g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
- g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
- g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+ g_session_auth_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}"
+ g_session_encryption_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}"
roles:
- role: openshift_facts
tasks:
@@ -172,11 +172,11 @@
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
+ | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.ip') | default([]) | join(',')
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | lib_utils_oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- role: openshift_master_facts
@@ -185,9 +185,6 @@
- role: openshift_builddefaults
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- - role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
- role: openshift_master
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
@@ -206,13 +203,13 @@
- role: calico_master
when: openshift_use_calico | default(false) | bool
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: master
when: openshift_use_kuryr | default(false) | bool
- name: Setup the node group config maps
- include_role:
+ import_role:
name: openshift_node_group
when: openshift_master_bootstrap_enabled | default(false) | bool
run_once: True
diff --git a/playbooks/openshift-master/private/filter_plugins b/playbooks/openshift-master/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-master/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-master/private/library b/playbooks/openshift-master/private/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/openshift-master/private/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/openshift-master/private/lookup_plugins b/playbooks/openshift-master/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-master/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 9f5502141..663c39868 100644
--- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -125,7 +125,6 @@
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -133,6 +132,10 @@
register: g_master_mktemp
changed_when: false
+ - name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_master_mktemp.stdout }}"
+ changed_when: false
+
- name: Retrieve OpenShift CA
hosts: oo_first_master
vars:
@@ -212,18 +215,18 @@
when:
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -264,7 +267,6 @@
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file:
@@ -277,24 +279,24 @@
when:
# nodes
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
# masters
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+ | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
# etcd
- ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
+ | lib_utils_oo_select_keys(groups['etcd'])
+ | lib_utils_oo_collect('check_results.check_results.etcd')
+ | lib_utils_oo_collect('health')))
diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml
index a5dbe0590..76e1ea5f3 100644
--- a/playbooks/openshift-master/private/tasks/restart_hosts.yml
+++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml
@@ -27,7 +27,6 @@
delay=10
timeout=600
port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}"
- become: no
# Now that ssh is back up we can wait for API on the remote system,
# avoiding some potential connection issues from local system:
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
index 4e1b3a3be..cf2c282e3 100644
--- a/playbooks/openshift-master/private/tasks/restart_services.yml
+++ b/playbooks/openshift-master/private/tasks/restart_services.yml
@@ -1,4 +1,4 @@
---
-- include_role:
+- import_role:
name: openshift_master
tasks_from: restart.yml
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 4f55d5c82..59e2b515c 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -21,7 +21,7 @@
# TODO: this currently has a bug where hostnames are required
- name: Creating First Master Aggregator signer certs
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm ca create-signer-cert
--cert=/etc/origin/master/front-proxy-ca.crt
--key=/etc/origin/master/front-proxy-ca.key
--serial=/etc/origin/master/ca.serial.txt
@@ -84,7 +84,7 @@
- block:
- name: Create first master api-client config for Aggregator
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority=/etc/origin/master/front-proxy-ca.crt
--signer-cert=/etc/origin/master/front-proxy-ca.crt
--signer-key=/etc/origin/master/front-proxy-ca.key
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 1077d0b9c..60b0e5bb6 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -21,7 +21,6 @@
- name: Create temp file on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- local_action: command mktemp
@@ -38,7 +37,6 @@
- name: Cleanup temp file on localhost
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index f717cd0e9..7d31340a2 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -4,7 +4,6 @@
- name: Ensure there are new_masters or new_nodes
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- fail:
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 80cd93e5f..1e237e3f0 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -16,12 +16,13 @@
roles:
- role: openshift_metrics
+# TODO: Remove when master config property is removed
- name: OpenShift Metrics
hosts: oo_masters:!oo_first_master
serial: 1
tasks:
- name: Setup the non-first masters configs
- include_role:
+ import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
diff --git a/playbooks/openshift-metrics/private/library b/playbooks/openshift-metrics/private/library
deleted file mode 120000
index ba40d2f56..000000000
--- a/playbooks/openshift-metrics/private/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library \ No newline at end of file
diff --git a/playbooks/openshift-nfs/private/filter_plugins b/playbooks/openshift-nfs/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-nfs/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-nfs/private/lookup_plugins b/playbooks/openshift-nfs/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-nfs/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml
index b86cb3cc2..0881121c9 100644
--- a/playbooks/openshift-node/private/additional_config.yml
+++ b/playbooks/openshift-node/private/additional_config.yml
@@ -47,17 +47,23 @@
- role: nuage_node
when: openshift_use_nuage | default(false) | bool
-- name: Additional node config
- hosts: oo_nodes_use_contiv
+- name: Configure Contiv masters
+ hosts: oo_masters_to_config
+ roles:
+ - role: contiv
+ contiv_master: true
+ when: openshift_use_contiv | default(false) | bool
+
+- name: Configure rest of Contiv nodes
+ hosts: "{{ groups.oo_nodes_use_contiv | default([]) | difference(groups.oo_masters_to_config) }}"
roles:
- role: contiv
- contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
- name: Configure Kuryr node
hosts: oo_nodes_use_kuryr
tasks:
- - include_role:
+ - import_role:
name: kuryr
tasks_from: node
when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index 32b288c8b..a13173e63 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -4,13 +4,14 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
- role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index ef07669cb..644e6a69c 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -5,14 +5,15 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- role: openshift_clock
+ - role: openshift_cloud_provider
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: nickhammond.logrotate
diff --git a/playbooks/openshift-node/private/filter_plugins b/playbooks/openshift-node/private/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openshift-node/private/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml
index 6b517197d..adcbb0fdb 100644
--- a/playbooks/openshift-node/private/image_prep.yml
+++ b/playbooks/openshift-node/private/image_prep.yml
@@ -12,6 +12,13 @@
- name: run node config
import_playbook: configure_nodes.yml
+- name: node bootstrap config
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: bootstrap.yml
+
- name: Re-enable excluders
import_playbook: enable_excluders.yml
diff --git a/playbooks/openshift-node/private/lookup_plugins b/playbooks/openshift-node/private/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openshift-node/private/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index c2092b23c..7249ced70 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -28,7 +28,7 @@
- "{{ openshift_service_type }}-master-controllers"
- "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 541913aef..41c323f2b 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -8,7 +8,6 @@
- name: Evaluate node groups
hosts: localhost
- become: no
connection: local
tasks:
- name: Evaluate oo_containerized_master_nodes
@@ -21,6 +20,6 @@
when:
- hostvars[item].openshift is defined
- hostvars[item].openshift.common is defined
- - hostvars[item].openshift.common.is_containerized | bool
+ - hostvars[item].openshift_is_containerized | bool
- (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index bdfd3d3e6..cf13692ae 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -4,7 +4,6 @@
- name: Ensure there are new_nodes
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- fail:
diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-web-console/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-web-console/private/config.yml b/playbooks/openshift-web-console/private/config.yml
new file mode 100644
index 000000000..ffd702d20
--- /dev/null
+++ b/playbooks/openshift-web-console/private/config.yml
@@ -0,0 +1,31 @@
+---
+- name: Web Console Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: Web Console
+ hosts: oo_first_master
+ roles:
+ - openshift_web_console
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Web Console Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Web Console install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_web_console:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/openshift-web-console/private/roles b/playbooks/openshift-web-console/private/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/openshift-web-console/private/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index f3b691790..2eb668dd1 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -184,7 +184,9 @@ Then run the provision + install playbook -- this will create the OpenStack
resources:
```bash
-$ ansible-playbook --user openshift -i inventory \
+$ ansible-playbook --user openshift \
+ -i openshift-ansible/playbooks/openstack/inventory.py
+ -i inventory \
openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml \
-e openshift_repos_enable_testing=true
```
@@ -192,6 +194,11 @@ $ ansible-playbook --user openshift -i inventory \
Note, you may want to use the testing repo for development purposes only.
Normally, `openshift_repos_enable_testing` should not be specified.
+In addition to *your* inventory with your OpenShift and OpenStack
+configuration, we are also supplying the [dynamic inventory][dynamic] from
+`openshift-ansible/inventory`. It's a script that will look at the Nova servers
+and other resources that will be created and let Ansible know about them.
+
If you're using multiple inventories, make sure you pass the path to
the right one to `-i`.
@@ -235,3 +242,4 @@ advanced configuration:
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
+[dynamic]: http://docs.ansible.com/ansible/latest/intro_dynamic_inventory.html
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index 2c9b70b5f..e8f4cfc32 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -1,9 +1,8 @@
## Dependencies for localhost (ansible control/admin node)
-* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
-* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
-* [jinja2](http://jinja.pocoo.org/docs/2.9/)
-* [shade](https://pypi.python.org/pypi/shade)
+* [Ansible](https://pypi.python.org/pypi/ansible) version >=2.4.0
+* [jinja2](http://jinja.pocoo.org/docs/2.9/) version >= 2.10
+* [shade](https://pypi.python.org/pypi/shade) version >= 1.26
* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
* Become (sudo) is not required.
@@ -133,7 +132,7 @@ You can also access the OpenShift cluster with a web browser by going to:
https://master-0.openshift.example.com:8443
Note that for this to work, the OpenShift nodes must be accessible
-from your computer and it's DNS configuration must use the cruster's
+from your computer and its DNS configuration must use the cluster's
DNS.
@@ -153,7 +152,7 @@ openstack stack delete --wait --yes openshift.example.com
Pay special attention to the values in the first paragraph -- these
will depend on your OpenStack environment.
-Note that the provsisioning playbooks update the original Neutron subnet
+Note that the provisioning playbooks update the original Neutron subnet
created with the Heat stack to point to the configured DNS servers.
So the provisioned cluster nodes will start using those natively as
default nameservers. Technically, this allows to deploy OpenShift clusters
@@ -162,7 +161,7 @@ without dnsmasq proxies.
The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain`
will form the cluster's public DNS domain all your servers will be under. With
the default values, this will be `openshift.example.com`. For workloads, the
-default subdomain is 'apps'. That sudomain can be set as well by the
+default subdomain is 'apps'. That subdomain can be set as well by the
`openshift_openstack_app_subdomain` variable in the inventory.
If you want to use a two sets of hostnames for public and private/prefixed DNS
@@ -334,7 +333,7 @@ or your trusted network. The most important is the `openshift_openstack_node_ing
that restricts public access to the deployed DNS server and cluster
nodes' ephemeral ports range.
-Note, the command ``curl https://api.ipify.org`` helps fiding an external
+Note, the command ``curl https://api.ipify.org`` helps finding an external
IP address of your box (the ansible admin node).
There is also the `manage_packages` variable (defaults to True) you
@@ -372,6 +371,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos
Note than an empty hostname does not work, so if your domain is `openshift.example.com`,
you cannot set this value to simply `openshift.example.com`.
+
+## Using Cinder-backed Persistent Volumes
+
+You will need to set up OpenStack credentials. You can try putting this in your
+`inventory/group_vars/OSEv3.yml`:
+
+ openshift_cloudprovider_kind: openstack
+ openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
+ openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
+ openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
+ openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}"
+ openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+ openshift_cloudprovider_openstack_blockstorage_version: v2
+
+**NOTE**: you must specify the Block Storage version as v2, because OpenShift
+does not support the v3 API yet and the version detection is currently not
+working properly.
+
+For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials].
+
+[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
+
+**NOTE** the OpenStack integration currently requires DNS to be configured and
+running and the `openshift_hostname` variable must match the Nova server name
+for each node. The cluster deployment will fail without it. If you use the
+provided OpenStack dynamic inventory and configure the
+`openshift_openstack_dns_nameservers` Ansible variable, this will be handled
+for you.
+
+After a successful deployment, the cluster is configured for Cinder persistent
+volumes.
+
+### Validation
+
+1. Log in and create a new project (with `oc login` and `oc new-project`)
+2. Create a file called `cinder-claim.yaml` with the following contents:
+
+```yaml
+apiVersion: "v1"
+kind: "PersistentVolumeClaim"
+metadata:
+ name: "claim1"
+spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+```
+3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift
+4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound`
+5. Run `openstack volume list`
+ * A new volume called `kubernetes-dynamic-pvc-UUID` should be created
+ * Its size should be `1`
+ * It should not be attached to any server
+6. Create a file called `mysql-pod.yaml` with the following contents:
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mysql
+ labels:
+ name: mysql
+spec:
+ containers:
+ - resources:
+ limits :
+ cpu: 0.5
+ image: openshift/mysql-55-centos7
+ name: mysql
+ env:
+ - name: MYSQL_ROOT_PASSWORD
+ value: yourpassword
+ - name: MYSQL_USER
+ value: wp_user
+ - name: MYSQL_PASSWORD
+ value: wp_pass
+ - name: MYSQL_DATABASE
+ value: wp_db
+ ports:
+ - containerPort: 3306
+ name: mysql
+ volumeMounts:
+ - name: mysql-persistent-storage
+ mountPath: /var/lib/mysql/data
+ volumes:
+ - name: mysql-persistent-storage
+ persistentVolumeClaim:
+ claimName: claim1
+```
+
+7. Run `oc create -f mysql-pod.yaml` to create the pod
+8. Run `oc describe pod mysql`
+ * Its events should show that the pod has successfully attached the volume above
+ * It should show no errors
+ * `openstack volume list` should show the volume attached to an OpenShift app node
+ * NOTE: this can take several seconds
+9. After a while, `oc get pod` should show the `mysql` pod as running
+10. Run `oc delete pod mysql` to remove the pod
+ * The Cinder volume should no longer be attached
+11. Run `oc delete pvc claim1` to remove the volume claim
+ * The Cinder volume should be deleted
+
+
+
## Creating and using a Cinder volume for the OpenShift registry
You can optionally have the playbooks create a Cinder volume and set
@@ -415,7 +520,7 @@ OpenStack)[openstack] for more information.
[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html
-Next, we need to instruct OpenShift to use the Cinder volume for it's
+Next, we need to instruct OpenShift to use the Cinder volume for its
registry. Again in `OSEv3.yml`:
#openshift_hosted_registry_storage_kind: openstack
@@ -470,12 +575,12 @@ The **Cinder volume ID**, **filesystem** and **volume size** variables
must correspond to the values in your volume. The volume ID must be
the **UUID** of the Cinder volume, *not its name*.
-We can do formate the volume for you if you ask for it in
+The volume can also be formatted if you configure it in
`inventory/group_vars/all.yml`:
openshift_openstack_prepare_and_format_registry_volume: true
-**NOTE:** doing so **will destroy any data that's currently on the volume**!
+**NOTE:** Formatting **will destroy any data that's currently on the volume**!
You can also run the registry setup playbook directly:
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/inventory.py
index ad3fd936b..76e658eb7 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -9,6 +9,7 @@ environment.
from __future__ import print_function
+from collections import Mapping
import json
import shade
@@ -42,7 +43,10 @@ def build_inventory():
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
- nodes = list(set(masters + infra_hosts + app))
+ cns = [server.name for server in cluster_hosts
+ if server.metadata['host-type'] == 'cns']
+
+ nodes = list(set(masters + infra_hosts + app + cns))
dns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'dns']
@@ -59,6 +63,7 @@ def build_inventory():
inventory['nodes'] = {'hosts': nodes}
inventory['infra_hosts'] = {'hosts': infra_hosts}
inventory['app'] = {'hosts': app}
+ inventory['glusterfs'] = {'hosts': cns}
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
@@ -84,16 +89,25 @@ def build_inventory():
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
+ hostvars['openshift_ip'] = server.private_v4
+
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
- hostvars['openshift_ip'] = server.private_v4
- hostvars['openshift_hostname'] = server.private_v4
+ hostvars['openshift_hostname'] = server.metadata.get(
+ 'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
+ if server.metadata['host-type'] == 'cns':
+ hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
+
node_labels = server.metadata.get('node_labels')
+ # NOTE(shadower): the node_labels value must be a dict not string
+ if not isinstance(node_labels, Mapping):
+ node_labels = json.loads(node_labels)
+
if node_labels:
hostvars['openshift_node_labels'] = node_labels
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 3211f619a..2ab7d14a0 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -9,4 +9,7 @@
# some logic here?
- name: run the cluster deploy
+ import_playbook: ../../prerequisites.yml
+
+- name: run the cluster deploy
import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml
index 0356b37dd..8bb700501 100644
--- a/playbooks/openstack/openshift-cluster/prerequisites.yml
+++ b/playbooks/openstack/openshift-cluster/prerequisites.yml
@@ -2,11 +2,11 @@
- hosts: localhost
tasks:
- name: Check dependencies and OpenStack prerequisites
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: check-prerequisites.yml
- name: Check network configuration
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: net_vars_check.yaml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 583e72b51..a38d7bff7 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -3,7 +3,7 @@
hosts: localhost
tasks:
- name: provision cluster
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: provision.yml
@@ -36,7 +36,7 @@
hosts: localhost
tasks:
- name: Populate DNS entries
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: populate-dns.yml
when:
@@ -47,20 +47,28 @@
hosts: oo_all_hosts
become: yes
gather_facts: yes
- roles:
- - role: rhel_subscribe
+ tasks:
+ - name: Subscribe RHEL instances
+ import_role:
+ name: rhel_subscribe
when:
- ansible_distribution == "RedHat"
- - rhsub_user | default(False)
- - rhsub_pass | default(False)
+ - rhsub_user is defined
+ - rhsub_pass is defined
+
+ - name: Enable required YUM repositories
+ import_role:
+ name: openshift_repos
+ when:
+ - ansible_distribution == "RedHat"
+ - rh_subscribed is defined
- tasks:
- name: Install dependencies
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: node-packages.yml
- name: Configure Node
- include_role:
+ import_role:
name: openshift_openstack
tasks_from: node-configuration.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 933117127..a8663f946 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -14,12 +14,13 @@ openshift_hosted_router_wait: True
openshift_hosted_registry_wait: True
## Openstack credentials
-#openshift_cloudprovider_kind=openstack
+#openshift_cloudprovider_kind: openstack
#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
-#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+#openshift_cloudprovider_openstack_blockstorage_version: v2
## Use Cinder volume for Openshift registry:
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index c7afe9a24..d63229120 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -7,6 +7,7 @@ openshift_openstack_dns_nameservers: []
# # - set custom hostnames for roles by uncommenting corresponding lines
#openshift_openstack_master_hostname: "master"
#openshift_openstack_infra_hostname: "infra-node"
+#openshift_openstack_cns_hostname: "cns"
#openshift_openstack_node_hostname: "app-node"
#openshift_openstack_lb_hostname: "lb"
#openshift_openstack_etcd_hostname: "etcd"
@@ -30,6 +31,7 @@ openshift_openstack_external_network_name: "public"
# # - note: do not remove openshift_openstack_default_image_name definition
#openshift_openstack_master_image_name: "centos7"
#openshift_openstack_infra_image_name: "centos7"
+#openshift_openstack_cns_image_name: "centos7"
#openshift_openstack_node_image_name: "centos7"
#openshift_openstack_lb_image_name: "centos7"
#openshift_openstack_etcd_image_name: "centos7"
@@ -37,6 +39,7 @@ openshift_openstack_default_image_name: "centos7"
openshift_openstack_num_masters: 1
openshift_openstack_num_infra: 1
+openshift_openstack_num_cns: 0
openshift_openstack_num_nodes: 2
# # Used Flavors
@@ -44,6 +47,7 @@ openshift_openstack_num_nodes: 2
# # - note: do note remove openshift_openstack_default_flavor definition
#openshift_openstack_master_flavor: "m1.medium"
#openshift_openstack_infra_flavor: "m1.medium"
+#openshift_openstack_cns_flavor: "m1.medium"
#openshift_openstack_node_flavor: "m1.medium"
#openshift_openstack_lb_flavor: "m1.medium"
#openshift_openstack_etcd_flavor: "m1.medium"
@@ -57,6 +61,7 @@ openshift_openstack_default_flavor: "m1.medium"
# # - note: do not remove docker_default_volume_size definition
#openshift_openstack_docker_master_volume_size: "15"
#openshift_openstack_docker_infra_volume_size: "15"
+#openshift_openstack_docker_cns_volume_size: "15"
#openshift_openstack_docker_node_volume_size: "15"
#openshift_openstack_docker_etcd_volume_size: "2"
#openshift_openstack_docker_lb_volume_size: "5"
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 5ba62a6d6..7802f83d9 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,12 +3,19 @@
vars:
skip_verison: True
+- import_playbook: init/validate_hostnames.yml
+ when: not (skip_validate_hostnames | default(False))
+
- import_playbook: init/repos.yml
+- import_playbook: init/base_packages.yml
+
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
roles:
- role: os_firewall
+- import_playbook: container-runtime/private/setup_storage.yml
+
- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
index b5fcb951d..4e6defd6e 100644
--- a/playbooks/redeploy-certificates.yml
+++ b/playbooks/redeploy-certificates.yml
@@ -9,7 +9,7 @@
- import_playbook: openshift-etcd/private/restart.yml
vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
- import_playbook: openshift-master/private/restart.yml
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
index ec4aafb79..65b736500 100644
--- a/roles/ansible_service_broker/meta/main.yml
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_utils
- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 4ca47d074..f869b5fae 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -4,7 +4,7 @@
- name: Set default image variables based on deployment type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: set ansible_service_broker facts
@@ -72,6 +72,15 @@
- apiGroups: ["image.openshift.io", ""]
resources: ["images"]
verbs: ["get", "list"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["clusternetworks", "netnamespaces"]
+ verbs: ["get"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["netnamespaces"]
+ verbs: ["update"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete"]
- name: Create asb-access cluster role
oc_clusterrole:
@@ -366,6 +375,11 @@
secret:
secretName: etcd-auth-secret
+- name: set auth name and type facts if needed
+ set_fact:
+ ansible_service_broker_registry_auth_type: "secret"
+ ansible_service_broker_registry_auth_name: "asb-registry-auth"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
- name: Create config map for ansible-service-broker
@@ -393,6 +407,8 @@
org: {{ ansible_service_broker_registry_organization }}
tag: {{ ansible_service_broker_registry_tag }}
white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }}
+ auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
+ auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
- type: local_openshift
name: localregistry
namespaces: ['openshift']
@@ -438,6 +454,7 @@
data: "{{ ansible_service_broker_registry_user }}"
- path: password
data: "{{ ansible_service_broker_registry_password }}"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
- name: Create the Broker resource in the catalog
oc_obj:
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 248e0363d..0ed1d9674 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -1,6 +1,6 @@
---
-__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/origin-
__ansible_service_broker_image_tag: latest
__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
diff --git a/roles/calico/meta/main.yml b/roles/calico/meta/main.yml
index 816c81369..e3997911b 100644
--- a/roles/calico/meta/main.yml
+++ b/roles/calico/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: openshift_facts
- role: openshift_master_facts
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index bbc6edd48..556953a71 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -7,7 +7,7 @@
- not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
- name: Calico Node | Generate OpenShift-etcd certs
- include_role:
+ import_role:
name: etcd
tasks_from: client_certificates
when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
diff --git a/roles/calico_master/meta/main.yml b/roles/calico_master/meta/main.yml
index 4d70c79cf..73c94db4e 100644
--- a/roles/calico_master/meta/main.yml
+++ b/roles/calico_master/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: calico
- role: openshift_facts
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 16d960d8b..834ebba64 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -19,11 +19,11 @@
- name: Calico Master | Launch Calico Policy Controller
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/calico-policy-controller.yml
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: calico_create_output
- failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0"
changed_when: ('created' in calico_create_output.stdout)
- name: Calico Master | Delete temp directory
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml
index 4d619fff6..372c29c28 100644
--- a/roles/cockpit-ui/meta/main.yml
+++ b/roles/cockpit-ui/meta/main.yml
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_utils
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index f60912033..d4174d879 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -39,7 +39,7 @@
- name: Deploy registry-console
command: >
- {{ openshift.common.client_binary }} new-app --template=registry-console
+ {{ openshift_client_binary }} new-app --template=registry-console
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml
index 8c0ed3cb8..07e466f04 100644
--- a/roles/cockpit/meta/main.yml
+++ b/roles/cockpit/meta/main.yml
@@ -12,4 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index fc13afed3..577cd7daf 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -10,7 +10,7 @@
- cockpit-bridge
- cockpit-docker
- "{{ cockpit_plugins }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -19,4 +19,4 @@
name: cockpit.socket
enabled: true
state: started
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index 51f469aaf..665b1b012 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -5,7 +5,7 @@ Ensures docker package or system container is installed, and optionally raises t
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-This role is designed to be used with include_role and tasks_from.
+This role is designed to be used with import_role and tasks_from.
Entry points
------------
@@ -30,7 +30,7 @@ Example Playbook
- hosts: servers
tasks:
- - include_role: container_runtime
+ - import_role: container_runtime
tasks_from: package_docker.yml
License
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index dd185cb38..d0e37e2f4 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -2,8 +2,6 @@
docker_cli_auth_config_path: '/root/.docker'
openshift_docker_signature_verification: False
-repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
openshift_docker_alternative_creds: False
# oreg_url is defined by user input.
@@ -13,7 +11,7 @@ oreg_auth_credentials_replace: False
openshift_docker_use_system_container: False
openshift_docker_disable_push_dockerhub: False # bool
openshift_docker_selinux_enabled: True
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
openshift_docker_hosted_registry_insecure: False # bool
@@ -55,11 +53,25 @@ openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['o
docker_alt_storage_path: /var/lib/containers/docker
docker_default_storage_path: /var/lib/docker
+docker_storage_path: "{{ docker_default_storage_path }}"
+docker_storage_size: 40G
+docker_storage_setup_options:
+ vg: docker_vg
+ data_size: 99%VG
+ storage_driver: overlay2
+ root_lv_name: docker-root-lv
+ root_lv_size: 100%FREE
+ root_lv_mount_path: "{{ docker_storage_path }}"
+docker_storage_extra_options:
+- "--storage-opt overlay2.override_kernel_check=true"
+- "--storage-opt overlay2.size={{ docker_storage_size }}"
+- "--graph={{ docker_storage_path}}"
+
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
-l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | lib_utils_oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml
index 02fceb745..3bc2607fb 100644
--- a/roles/container_runtime/meta/main.yml
+++ b/roles/container_runtime/meta/main.yml
@@ -11,5 +11,5 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
index d790eb2c0..23fd8528a 100644
--- a/roles/container_runtime/tasks/common/post.yml
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -11,7 +11,7 @@
- meta: flush_handlers
# This needs to run after docker is restarted to account for proxy settings.
-# registry_auth is called directly with include_role in some places, so we
+# registry_auth is called directly with import_role in some places, so we
# have to put it in the root of the tasks/ directory.
- include_tasks: ../registry_auth.yml
@@ -22,5 +22,5 @@
- include_tasks: setup_docker_symlink.yml
when:
- - openshift_use_crio
+ - openshift_use_crio | bool
- dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
index b41122880..d429047e6 100644
--- a/roles/container_runtime/tasks/common/syscontainer_packages.yml
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -4,7 +4,7 @@
package:
name: container-selinux
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -13,7 +13,7 @@
package:
name: atomic
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -23,6 +23,6 @@
package:
name: runc
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/container_runtime/tasks/docker_storage_setup_overlay.yml b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml
new file mode 100644
index 000000000..782c002e3
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml
@@ -0,0 +1,10 @@
+---
+- name: Setup the docker-storage for overlay
+ template:
+ src: docker_storage_setup.j2
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+ when:
+ - container_runtime_docker_storage_type == 'overlay2'
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
index 6731963dd..8dd916e79 100644
--- a/roles/container_runtime/tasks/docker_upgrade_check.yml
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -21,6 +21,7 @@
retries: 4
until: curr_docker_version is succeeded
changed_when: false
+ when: not openshift_is_atomic | bool
- name: Get latest available version of Docker
command: >
@@ -29,7 +30,9 @@
retries: 4
until: avail_docker_version is succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
failed_when: false
changed_when: false
@@ -37,9 +40,10 @@
msg: This playbook requires access to Docker 1.12 or later
# Disable the 1.12 requirement if the user set a specific Docker version
when:
- - docker_version is not defined
- - docker_upgrade is not defined or docker_upgrade | bool == True
- - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
+ - not openshift_is_atomic | bool
+ - docker_version is not defined
+ - docker_upgrade is not defined or docker_upgrade | bool == True
+ - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
@@ -48,27 +52,30 @@
# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0 and docker_version is not defined
- name: Flag for Docker upgrade if necessary
set_fact:
l_docker_upgrade: True
when:
- - pkg_check.rc == 0
- - curr_docker_version.stdout is version_compare(docker_version,'<')
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
+ - curr_docker_version.stdout is version_compare(docker_version,'<')
# Additional checks for Atomic hosts:
- name: Determine available Docker
shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- set_fact:
l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- fail:
msg: This playbook requires access to Docker 1.12 or later
when:
- - openshift.common.is_atomic | bool
- - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
+ - openshift_is_atomic | bool
+ - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 96d8606c6..07da831c4 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,2 +1,2 @@
---
-# This role is meant to be used with include_role and tasks_from.
+# This role is meant to be used with import_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index d9d4037dd..d6e7e7fed 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -3,7 +3,7 @@
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: curr_docker_version
retries: 4
until: curr_docker_version is succeeded
@@ -20,7 +20,7 @@
name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
state: present
when:
- - not (openshift.common.is_atomic | bool)
+ - not (openshift_is_atomic | bool)
- not (curr_docker_version is skipped)
- not (curr_docker_version.stdout != '')
register: result
@@ -48,7 +48,7 @@
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
when:
- item.reg_fact_val != []
- docker_check.stat.isreg is defined
@@ -101,7 +101,7 @@
line: "OPTIONS='\
{% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
{% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \
- {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
{% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \
{% if docker_options is defined %} {{ docker_options }}{% endif %} \
{% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \
diff --git a/roles/container_runtime/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml
index 2c7bc5711..4f1abd59a 100644
--- a/roles/container_runtime/tasks/registry_auth.yml
+++ b/roles/container_runtime/tasks/registry_auth.yml
@@ -15,6 +15,7 @@
- not openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ no_log: True
# docker_creds is a custom module from lib_utils
# 'docker login' requires a docker.service running on the local host, this is an
@@ -30,3 +31,4 @@
- openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ no_log: True
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 61f122f3c..d588f2618 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -3,7 +3,7 @@
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_node_system_container | bool
- include_tasks: common/pre.yml
@@ -81,6 +81,17 @@
dest: /etc/cni/net.d/openshift-sdn.conf
src: 80-openshift-sdn.conf.j2
+- name: Create /etc/sysconfig/crio-storage
+ copy:
+ content: ""
+ dest: /etc/sysconfig/crio-storage
+ force: no
+
+- name: Create /etc/sysconfig/crio-network
+ template:
+ dest: /etc/sysconfig/crio-network
+ src: crio-network.j2
+
- name: Start the CRI-O service
systemd:
name: "cri-o"
@@ -93,4 +104,4 @@
# 'docker login'
- include_tasks: common/post.yml
vars:
- openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
+ openshift_docker_alternative_creds: "{{ openshift_use_crio_only | bool }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 639585367..5f715cd21 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -18,7 +18,7 @@
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -42,6 +42,12 @@
- debug:
var: l_docker_image
+# Do the authentication before pulling the container engine system container
+# as the pull might be from an authenticated registry.
+- include_tasks: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: True
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2
new file mode 100644
index 000000000..763be97d7
--- /dev/null
+++ b/roles/container_runtime/templates/crio-network.j2
@@ -0,0 +1,9 @@
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY={{ openshift.common.http_proxy }}
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY={{ openshift.common.https_proxy }}
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY={{ openshift.common.no_proxy }}
+{% endif %}
diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2
new file mode 100644
index 000000000..b056087e0
--- /dev/null
+++ b/roles/container_runtime/templates/docker_storage_setup.j2
@@ -0,0 +1,12 @@
+# Edit this file to override any configuration options specified in
+# /usr/lib/docker-storage-setup/docker-storage-setup.
+#
+# For more details refer to "man docker-storage-setup"
+DEVS={{ container_runtime_docker_storage_setup_device }}
+VG={{ docker_storage_setup_options.vg }}
+DATA_SIZE={{ docker_storage_setup_options.data_size }}
+STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}"
+CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}"
+CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}"
+CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}"
+EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}"
diff --git a/roles/contiv/README.md b/roles/contiv/README.md
index fa36039d9..ce414f9fb 100644
--- a/roles/contiv/README.md
+++ b/roles/contiv/README.md
@@ -19,8 +19,8 @@ Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Mini
* ``openshift_use_contiv=True``
* ``openshift_use_openshift_sdn=False``
* ``os_sdn_network_plugin_name='cni'``
-* ``netmaster_interface=eth0``
-* ``netplugin_interface=eth1``
+* ``contiv_netmaster_interface=eth0``
+* ``contiv_netplugin_interface=eth1``
* ref. Openshift docs Contiv section for more details
## Example bare metal deployment of Openshift + Contiv
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index aa976d921..4869abc61 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -1,51 +1,63 @@
---
# The version of Contiv binaries to use
-contiv_version: 1.1.1
+contiv_version: 1.2.0
# The version of cni binaries
-cni_version: v0.4.0
+contiv_cni_version: v0.4.0
+
+# If the node we are deploying to is to be a contiv master.
+contiv_master: false
contiv_default_subnet: "10.128.0.0/16"
contiv_default_gw: "10.128.254.254"
-# TCP port that Netmaster listens for network connections
-netmaster_port: 9999
-# Default for contiv_role
-contiv_role: netmaster
+# Ports netmaster listens on
+contiv_netmaster_port: 9999
+contiv_netmaster_port_proto: tcp
+contiv_ofnet_master_port: 9001
+contiv_ofnet_master_port_proto: tcp
+# Ports netplugin listens on
+contiv_netplugin_port: 6640
+contiv_netplugin_port_proto: tcp
+contiv_ofnet_vxlan_port: 9002
+contiv_ofnet_vxlan_port_proto: tcp
+contiv_ovs_port: 9003
+contiv_ovs_port_proto: tcp
-# TCP port that Netplugin listens for network connections
-netplugin_port: 6640
-contiv_rpc_port1: 9001
-contiv_rpc_port2: 9002
-contiv_rpc_port3: 9003
+contiv_vxlan_port: 4789
+contiv_vxlan_port_proto: udp
# Interface used by Netplugin for inter-host traffic when encap_mode is vlan.
# The interface must support 802.1Q trunking.
-netplugin_interface: "eno16780032"
+contiv_netplugin_interface: "eno16780032"
# IP address of the interface used for control communication within the cluster
# It needs to be reachable from all nodes in the cluster.
-netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
# IP used to terminate vxlan tunnels
-netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
# Interface used to bind Netmaster service
-netmaster_interface: "{{ netplugin_interface }}"
+contiv_netmaster_interface: "{{ contiv_netplugin_interface }}"
+
+# IP address of the interface used for control communication within the cluster
+# It needs to be reachable from all nodes in the cluster.
+contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
# Path to the contiv binaries
-bin_dir: /usr/bin
+contiv_bin_dir: /usr/bin
# Path to the contivk8s cni binary
-cni_bin_dir: /opt/cni/bin
+contiv_cni_bin_dir: /opt/cni/bin
# Path to cni archive download directory
-cni_download_dir: /tmp
+contiv_cni_download_dir: /tmp
# URL for cni binaries
-cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
-cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2"
+contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2"
# Contiv config directory
@@ -60,11 +72,11 @@ contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download
contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2"
# This is where kubelet looks for plugin files
-kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
+contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
# Specifies routed mode vs bridged mode for networking (bridge | routing)
# if you are using an external router for all routing, you should select bridge here
-netplugin_fwd_mode: bridge
+contiv_netplugin_fwd_mode: routing
# Contiv fabric mode aci|default
contiv_fabric_mode: default
@@ -73,10 +85,10 @@ contiv_fabric_mode: default
contiv_vlan_range: "2900-3000"
# Encapsulation type vlan|vxlan to use for instantiating container networks
-contiv_encap_mode: vlan
+contiv_encap_mode: vxlan
# Backend used by Netplugin for instantiating container networks
-netplugin_driver: ovs
+contiv_netplugin_driver: ovs
# Create a default Contiv network for use by pods
contiv_default_network: true
@@ -85,39 +97,80 @@ contiv_default_network: true
contiv_default_network_tag: ""
#SRFIXME (use the openshift variables)
-https_proxy: ""
-http_proxy: ""
-no_proxy: ""
+contiv_https_proxy: ""
+contiv_http_proxy: ""
+contiv_no_proxy: ""
# The following are aci specific parameters when contiv_fabric_mode: aci is set.
# Otherwise, you can ignore these.
-apic_url: ""
-apic_username: ""
-apic_password: ""
-apic_leaf_nodes: ""
-apic_phys_dom: ""
-apic_contracts_unrestricted_mode: no
-apic_epg_bridge_domain: not_specified
+contiv_apic_url: ""
+contiv_apic_username: ""
+contiv_apic_password: ""
+contiv_apic_leaf_nodes: ""
+contiv_apic_phys_dom: ""
+contiv_apic_contracts_unrestricted_mode: no
+contiv_apic_epg_bridge_domain: not_specified
apic_configure_default_policy: false
-apic_default_external_contract: "uni/tn-common/brc-default"
-apic_default_app_profile: "contiv-infra-app-profile"
-is_atomic: False
-kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
-master_name: "{{ groups['masters'][0] }}"
-contiv_etcd_port: 22379
-etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}"
-kube_ca_cert: "{{ kube_cert_dir }}/ca.crt"
-kube_key: "{{ kube_cert_dir }}/admin.key"
-kube_cert: "{{ kube_cert_dir }}/admin.crt"
-kube_master_api_port: 8443
+contiv_apic_default_external_contract: "uni/tn-common/brc-default"
+contiv_apic_default_app_profile: "contiv-infra-app-profile"
+contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
+contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt"
+contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key"
+contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt"
+contiv_kube_master_api_port: 8443
+contiv_kube_master_api_port_proto: tcp
# contivh1 default subnet and gateway
-#contiv_h1_subnet_default: "132.1.1.0/24"
-#contiv_h1_gw_default: "132.1.1.1"
contiv_h1_subnet_default: "10.129.0.0/16"
contiv_h1_gw_default: "10.129.0.1"
# contiv default private subnet for ext access
contiv_private_ext_subnet: "10.130.0.0/16"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+contiv_openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
+
+contiv_api_proxy_port: 10000
+contiv_api_proxy_port_proto: tcp
+contiv_api_proxy_image_repo: contiv/auth_proxy
+contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
+
+contiv_etcd_system_user: contivetcd
+contiv_etcd_system_uid: 823
+contiv_etcd_system_group: contivetcd
+contiv_etcd_system_gid: 823
+contiv_etcd_port: 22379
+contiv_etcd_port_proto: tcp
+contiv_etcd_peer_port: 22380
+contiv_etcd_peer_port_proto: tcp
+contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}"
+contiv_etcd_init_image_repo: ferest/etcd-initer
+contiv_etcd_init_image_tag: latest
+contiv_etcd_image_repo: quay.io/coreos/etcd
+contiv_etcd_image_tag: v3.2.4
+contiv_etcd_conf_dir: /etc/contiv-etcd
+contiv_etcd_data_dir: /var/lib/contiv-etcd
+contiv_etcd_peers: |-
+ {% for host in groups.oo_masters_to_config -%}
+ {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+# List of port/protocol pairs to allow inbound access to on every host
+# netplugin runs on, from all host IPs in the cluster.
+contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}",
+ "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}",
+ "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ]
+# Allow all forwarded traffic in and out of these interfaces.
+contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ]
+
+# List of port/protocol pairs to allow inbound access to on every host
+# netmaster runs on, from all host IPs in the cluster. Note that every host
+# that runs netmaster also runs netplugin, so the above netplugin rules will
+# apply as well.
+contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}",
+ "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}",
+ "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}",
+ "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}",
+ "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ]
+# List of port/protocol pairs to allow inbound access to on every host
+# netmaster runs on, from any host anywhere.
+contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ]
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index 52b9d09dd..e8607cc90 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -13,18 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: contiv_facts
-- role: etcd
- etcd_service: contiv-etcd
- etcd_is_thirdparty: True
- etcd_peer_port: 22380
- etcd_client_port: 22379
- etcd_conf_dir: /etc/contiv-etcd/
- etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_config_dir: /etc/contiv-etcd/
- etcd_url_scheme: http
- etcd_peer_url_scheme: http
- when: contiv_role == "netmaster"
-- role: contiv_auth_proxy
- when: contiv_role == "netmaster"
diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml
index 30d2eb339..8a56b3590 100644
--- a/roles/contiv/tasks/aci.yml
+++ b/roles/contiv/tasks/aci.yml
@@ -11,7 +11,7 @@
- name: ACI | Copy shell script used by aci-gw service
template:
src: aci_gw.j2
- dest: "{{ bin_dir }}/aci_gw.sh"
+ dest: "{{ contiv_bin_dir }}/aci_gw.sh"
mode: u=rwx,g=rx,o=rx
- name: ACI | Copy systemd units for aci-gw
diff --git a/roles/contiv/tasks/api_proxy.yml b/roles/contiv/tasks/api_proxy.yml
new file mode 100644
index 000000000..8b524dd6e
--- /dev/null
+++ b/roles/contiv/tasks/api_proxy.yml
@@ -0,0 +1,120 @@
+---
+- name: API proxy | Create contiv-api-proxy openshift user
+ oc_serviceaccount:
+ state: present
+ name: contiv-api-proxy
+ namespace: kube-system
+ run_once: true
+
+- name: API proxy | Set contiv-api-proxy openshift user permissions
+ oc_adm_policy_user:
+ user: system:serviceaccount:kube-system:contiv-api-proxy
+ resource_kind: scc
+ resource_name: hostnetwork
+ state: present
+ run_once: true
+
+- name: API proxy | Create temp directory for doing work
+ command: mktemp -d /tmp/openshift-contiv-XXXXXX
+ register: mktemp
+ changed_when: False
+ # For things that pass temp files between steps, we want to make sure they
+ # run on the same node.
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Check for existing api proxy secret volume
+ oc_obj:
+ namespace: kube-system
+ kind: secret
+ state: list
+ selector: "name=contiv-api-proxy-secret"
+ register: existing_secret_volume
+ run_once: true
+
+- name: API proxy | Generate a self signed certificate for api proxy
+ command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca
+ when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined)
+ and not existing_secret_volume.results.results[0]['items']
+ register: created_self_signed_cert
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Read self signed certificate file
+ command: cat "{{ mktemp.stdout }}/cert.pem"
+ register: generated_cert
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Read self signed key file
+ command: cat "{{ mktemp.stdout }}/key.pem"
+ register: generated_key
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-secrets.yml from template using generated cert
+ template:
+ src: api-proxy-secrets.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ vars:
+ key: "{{ generated_key.stdout }}"
+ cert: "{{ generated_cert.stdout }}"
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert
+ template:
+ src: api-proxy-secrets.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ vars:
+ key: "{{ lookup('file', contiv_api_proxy_key) }}"
+ cert: "{{ lookup('file', contiv_api_proxy_cert) }}"
+ when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create secret certificate volume
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: secret
+ name: contiv-api-proxy-secret
+ files:
+ - "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined)
+ or created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-daemonset.yml from template
+ template:
+ src: api-proxy-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
+ vars:
+ etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+# Always "import" this file, k8s won't do anything if it matches exactly what
+# is already in the cluster.
+- name: API proxy | Add API proxy daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-api-proxy
+ files:
+ - "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index 8a928ea54..e9763d34a 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -1,71 +1,71 @@
---
-- name: Contiv | Wait for netmaster
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls'
+- name: Default network | Wait for netmaster
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls'
register: tenant_result
until: tenant_result.stdout.find("default") != -1
retries: 9
delay: 10
-- name: Contiv | Set globals
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+- name: Default network | Set globals
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
run_once: true
-- name: Contiv | Set arp mode to flood if ACI
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
+- name: Default network | Set arp mode to flood if ACI
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
run_once: true
-- name: Contiv | Check if default-net exists
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
+- name: Default network | Check if default-net exists
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls'
register: net_result
run_once: true
-- name: Contiv | Create default-net
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
+- name: Default network | Create default-net
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
run_once: true
-- name: Contiv | Create host access infra network for VxLan routing case
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
- when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+- name: Default network | Create host access infra network for VxLan routing case
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
+ when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing")
run_once: true
-#- name: Contiv | Create an allow-all policy for the default-group
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
+#- name: Default network | Create an allow-all policy for the default-group
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
# run_once: true
-- name: Contiv | Set up aci external contract to consume default external contract
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
+- name: Default network | Set up aci external contract to consume default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-- name: Contiv | Set up aci external contract to provide default external contract
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
+- name: Default network | Set up aci external contract to provide default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-- name: Contiv | Create aci default-group
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
+- name: Default network | Create aci default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
run_once: true
-- name: Contiv | Add external contracts to the default-group
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
+- name: Default network | Add external contracts to the default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-#- name: Contiv | Add policy rule 1 for allow-all policy
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
+#- name: Default network | Add policy rule 1 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
# run_once: true
-#- name: Contiv | Add policy rule 2 for allow-all policy
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
+#- name: Default network | Add policy rule 2 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
# run_once: true
-- name: Contiv | Create default aci app profile
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
+- name: Default network | Create default aci app profile
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
run_once: true
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
index 831fd360a..47d74da9c 100644
--- a/roles/contiv/tasks/download_bins.yml
+++ b/roles/contiv/tasks/download_bins.yml
@@ -4,7 +4,7 @@
path: "{{ contiv_current_release_directory }}"
state: directory
-- name: Install bzip2
+- name: Download Bins | Install bzip2
yum:
name: bzip2
state: installed
@@ -18,9 +18,9 @@
mode: 0755
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
- name: Download Bins | Extract Contiv tar file
unarchive:
@@ -30,19 +30,19 @@
- name: Download Bins | Download cni tar file
get_url:
- url: "{{ cni_bin_url }}"
- dest: "{{ cni_download_dir }}"
+ url: "{{ contiv_cni_bin_url }}"
+ dest: "{{ contiv_cni_download_dir }}"
mode: 0755
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
register: download_file
- name: Download Bins | Extract cni tar file
unarchive:
src: "{{ download_file.dest }}"
- dest: "{{ cni_download_dir }}"
+ dest: "{{ contiv_cni_download_dir }}"
copy: no
when: download_file.changed
diff --git a/roles/contiv/tasks/etcd.yml b/roles/contiv/tasks/etcd.yml
new file mode 100644
index 000000000..b08ead982
--- /dev/null
+++ b/roles/contiv/tasks/etcd.yml
@@ -0,0 +1,114 @@
+---
+# To run contiv-etcd in a container as non-root, we need to match the uid/gid
+# with the filesystem permissions on the host.
+- name: Contiv etcd | Create local unix group
+ group:
+ name: "{{ contiv_etcd_system_group }}"
+ gid: "{{ contiv_etcd_system_gid }}"
+ system: yes
+
+- name: Contiv etcd | Create local unix user
+ user:
+ name: "{{ contiv_etcd_system_user }}"
+ createhome: no
+ uid: "{{ contiv_etcd_system_uid }}"
+ group: "{{ contiv_etcd_system_group }}"
+ home: "{{ contiv_etcd_data_dir }}"
+ shell: /bin/false
+ system: yes
+
+- name: Contiv etcd | Create directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ mode: g-rwx,o-rwx
+ owner: "{{ contiv_etcd_system_user }}"
+ group: "{{ contiv_etcd_system_group }}"
+ setype: svirt_sandbox_file_t
+ seuser: system_u
+ serole: object_r
+ selevel: s0
+ recurse: yes
+ with_items:
+ - "{{ contiv_etcd_data_dir }}"
+ - "{{ contiv_etcd_conf_dir }}"
+
+- name: Contiv etcd | Create contiv-etcd openshift user
+ oc_serviceaccount:
+ state: present
+ name: contiv-etcd
+ namespace: kube-system
+ run_once: true
+
+- name: Contiv etcd | Create temp directory for doing work
+ command: mktemp -d /tmp/openshift-contiv-XXXXXX
+ register: mktemp
+ changed_when: False
+ # For things that pass temp files between steps, we want to make sure they
+ # run on the same node.
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd-scc.yml from template
+ template:
+ src: etcd-scc.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-scc.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd.yml from template
+ template:
+ src: etcd-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd-proxy.yml from template
+ template:
+ src: etcd-proxy-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Add etcd scc
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: SecurityContextConstraints
+ name: contiv-etcd
+ files:
+ - "{{ mktemp.stdout }}/etcd-scc.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+# Always "import" this file, k8s won't do anything if it matches exactly what
+# is already in the cluster.
+- name: Contiv etcd | Add etcd daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-etcd
+ files:
+ - "{{ mktemp.stdout }}/etcd-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Add etcd-proxy daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-etcd-proxy
+ files:
+ - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml
index cb9196a71..4d530ae90 100644
--- a/roles/contiv/tasks/main.yml
+++ b/roles/contiv/tasks/main.yml
@@ -1,14 +1,15 @@
---
-- name: Ensure bin_dir exists
+- include_tasks: old_version_cleanup.yml
+
+- name: Ensure contiv_bin_dir exists
file:
- path: "{{ bin_dir }}"
+ path: "{{ contiv_bin_dir }}"
recurse: yes
state: directory
- include_tasks: download_bins.yml
- include_tasks: netmaster.yml
- when: contiv_role == "netmaster"
+ when: contiv_master
- include_tasks: netplugin.yml
- when: contiv_role == "netplugin"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index 6f15af8c2..bb22fb801 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -1,34 +1,16 @@
---
- include_tasks: netmaster_firewalld.yml
- when: has_firewalld
+ when: contiv_has_firewalld
- include_tasks: netmaster_iptables.yml
- when: not has_firewalld and has_iptables
+ when: not contiv_has_firewalld and contiv_has_iptables
-- name: Netmaster | Check is /etc/hosts file exists
- stat:
- path: /etc/hosts
- register: hosts
-
-- name: Netmaster | Create hosts file if it is not present
- file:
- path: /etc/hosts
- state: touch
- when: not hosts.stat.exists
-
-- name: Netmaster | Build hosts file
- lineinfile:
- dest: /etc/hosts
- regexp: .*netmaster$
- line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster"
- state: present
- when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined
- with_items: "{{ groups['masters'] }}"
+- include_tasks: etcd.yml
- name: Netmaster | Create netmaster symlinks
file:
src: "{{ contiv_current_release_directory }}/{{ item }}"
- dest: "{{ bin_dir }}/{{ item }}"
+ dest: "{{ contiv_bin_dir }}/{{ item }}"
state: link
with_items:
- netmaster
@@ -36,7 +18,7 @@
- name: Netmaster | Copy environment file for netmaster
template:
- src: netmaster.env.j2
+ src: netmaster.j2
dest: /etc/default/netmaster
mode: 0644
notify: restart netmaster
@@ -75,3 +57,5 @@
- include_tasks: default_network.yml
when: contiv_default_network == true
+
+- include_tasks: api_proxy.yml
diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml
index 2975351ac..0d52f821d 100644
--- a/roles/contiv/tasks/netmaster_firewalld.yml
+++ b/roles/contiv/tasks/netmaster_firewalld.yml
@@ -1,16 +1,17 @@
---
-- name: Netmaster Firewalld | Open Netmaster port
+- name: Netmaster Firewalld | Add internal rules
firewalld:
- port: "{{ netmaster_port }}/tcp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
+ immediate: true
+ permanent: true
+ port: "{{ item[0] }}"
+ source: "{{ item[1] }}"
+ with_nested:
+ - "{{ contiv_netmaster_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-- name: Netmaster Firewalld | Save Netmaster port
+- name: Netmaster Firewalld | Add external rules
firewalld:
- port: "{{ netmaster_port }}/tcp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
+ port: "{{ item }}"
+ with_items: "{{ contiv_netmaster_external }}"
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index c98e7b6a5..3b68ea0c3 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -1,27 +1,32 @@
---
-- name: Netmaster IPtables | Get iptables rules
- command: iptables -L --wait
- register: iptablesrules
- check_mode: no
-
-- name: Netmaster IPtables | Enable iptables at boot
- service:
- name: iptables
- enabled: yes
- state: started
-
-- name: Netmaster IPtables | Open Netmaster with iptables
- command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ contiv_rpc_port1 }}"
- - "{{ contiv_rpc_port2 }}"
- - "{{ contiv_rpc_port3 }}"
- when: iptablesrules.stdout.find("contiv") == -1
+- name: Netmaster IPtables | Add internal rules
+ iptables:
+ action: insert
+ chain: INPUT
+ # Parsed from the contiv_netmaster_internal list, this will be tcp or udp.
+ protocol: "{{ item[0].split('/')[1] }}"
+ match: "{{ item[0].split('/')[1] }}"
+ # Parsed from the contiv_netmaster_internal list, this will be a port number.
+ destination_port: "{{ item[0].split('/')[0] }}"
+ # This is an IP address from a node in the cluster.
+ source: "{{ item[1] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_nested:
+ - "{{ contiv_netmaster_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
notify: Save iptables rules
-- name: Netmaster IPtables | Open netmaster main port
- command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
- when: iptablesrules.stdout.find("contiv") == -1
+- name: Netmaster IPtables | Add external rules
+ iptables:
+ action: insert
+ chain: INPUT
+ # Parsed from the contiv_netmaster_external list, this will be tcp or udp.
+ protocol: "{{ item.split('/')[1] }}"
+ match: "{{ item.split('/')[1] }}"
+ # Parsed from the contiv_netmaster_external list, this will be a port number.
+ destination_port: "{{ item.split('/')[0] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netmaster_external }}"
notify: Save iptables rules
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 540f6e4bc..60f432202 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -1,9 +1,9 @@
---
- include_tasks: netplugin_firewalld.yml
- when: has_firewalld
+ when: contiv_has_firewalld
- include_tasks: netplugin_iptables.yml
- when: has_iptables
+ when: not contiv_has_firewalld and contiv_has_iptables
- name: Netplugin | Ensure localhost entry correct in /etc/hosts
lineinfile:
@@ -20,41 +20,40 @@
state: absent
- include_tasks: ovs.yml
- when: netplugin_driver == "ovs"
+ when: contiv_netplugin_driver == "ovs"
- name: Netplugin | Create Netplugin bin symlink
file:
src: "{{ contiv_current_release_directory }}/netplugin"
- dest: "{{ bin_dir }}/netplugin"
+ dest: "{{ contiv_bin_dir }}/netplugin"
state: link
-
-- name: Netplugin | Ensure cni_bin_dir exists
+- name: Netplugin | Ensure contiv_cni_bin_dir exists
file:
- path: "{{ cni_bin_dir }}"
+ path: "{{ contiv_cni_bin_dir }}"
recurse: yes
state: directory
- name: Netplugin | Create CNI bin symlink
file:
src: "{{ contiv_current_release_directory }}/contivk8s"
- dest: "{{ cni_bin_dir }}/contivk8s"
+ dest: "{{ contiv_cni_bin_dir }}/contivk8s"
state: link
- name: Netplugin | Copy CNI loopback bin
copy:
- src: "{{ cni_download_dir }}/loopback"
- dest: "{{ cni_bin_dir }}/loopback"
+ src: "{{ contiv_cni_download_dir }}/loopback"
+ dest: "{{ contiv_cni_bin_dir }}/loopback"
remote_src: True
mode: 0755
-- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist
+- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist
file:
path: "{{ item }}"
recurse: yes
state: directory
with_items:
- - "{{ kube_plugin_dir }}"
+ - "{{ contiv_kube_plugin_dir }}"
- "/etc/cni/net.d"
- name: Netplugin | Ensure contiv_config_dir exists
@@ -68,7 +67,7 @@
src: contiv_cni.conf
dest: "{{ item }}"
with_items:
- - "{{ kube_plugin_dir }}/contiv_cni.conf"
+ - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf"
- "/etc/cni/net.d"
# notify: restart kubelet
@@ -85,11 +84,11 @@
mode: 0644
notify: restart netplugin
-- name: Docker | Make sure proxy setting exists
+- name: Netplugin | Make sure docker proxy setting exists
lineinfile:
dest: /etc/sysconfig/docker-network
regexp: '^https_proxy.*'
- line: 'https_proxy={{ https_proxy }}'
+ line: 'https_proxy={{ contiv_https_proxy }}'
state: present
register: docker_updated
@@ -103,9 +102,9 @@
command: systemctl daemon-reload
when: docker_updated is changed
-- name: Docker | Restart docker
+- name: Netplugin | Restart docker
service:
- name: "{{ openshift_docker_service_name }}"
+ name: "{{ contiv_openshift_docker_service_name }}"
state: restarted
when: docker_updated is changed
register: l_docker_restart_docker_in_contiv_result
diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml
index 3aeffae56..5ac531ec6 100644
--- a/roles/contiv/tasks/netplugin_firewalld.yml
+++ b/roles/contiv/tasks/netplugin_firewalld.yml
@@ -1,34 +1,17 @@
---
-- name: Netplugin Firewalld | Open Netplugin port
+- name: Netplugin Firewalld | Add internal rules
firewalld:
- port: "{{ netplugin_port }}/tcp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
-
-- name: Netplugin Firewalld | Save Netplugin port
- firewalld:
- port: "{{ netplugin_port }}/tcp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
-
-- name: Netplugin Firewalld | Open vxlan port
- firewalld:
- port: "8472/udp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
- when: contiv_encap_mode == "vxlan"
+ port: "{{ item[0] }}"
+ source: "{{ item[1] }}"
+ with_nested:
+ - "{{ contiv_netplugin_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld
+- name: Netplugin Firewalld | Add dns rule
firewalld:
- port: "8472/udp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
- when: contiv_encap_mode == "vxlan"
+ port: "53/udp"
+ interface: contivh0
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
index 3ea34645d..9d376f4e5 100644
--- a/roles/contiv/tasks/netplugin_iptables.yml
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -1,58 +1,52 @@
---
-- name: Netplugin IPtables | Get iptables rules
- command: iptables -L --wait
- register: iptablesrules
- check_mode: no
+- name: Netplugin IPtables | Add internal rules
+ iptables:
+ action: insert
+ chain: INPUT
+ protocol: "{{ item[0].split('/')[1] }}"
+ match: "{{ item[0].split('/')[1] }}"
+ destination_port: "{{ item[0].split('/')[0] }}"
+ source: "{{ item[1] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_nested:
+ - "{{ contiv_netplugin_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add [in] forward rules
+ iptables:
+ action: insert
+ chain: FORWARD
+ in_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netplugin_forward_interfaces }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add [out] forward rules
+ iptables:
+ action: insert
+ chain: FORWARD
+ out_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netplugin_forward_interfaces }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add dns rule
+ iptables:
+ action: insert
+ chain: INPUT
+ protocol: udp
+ match: udp
+ destination_port: 53
+ in_interface: contivh0
+ jump: ACCEPT
+ comment: contiv
+ notify: Save iptables rules
- name: Netplugin IPtables | Enable iptables at boot
service:
name: iptables
enabled: yes
- state: started
-
-- name: Netplugin IPtables | Open Netmaster with iptables
- command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ netmaster_port }}"
- - "{{ contiv_rpc_port1 }}"
- - "{{ contiv_rpc_port2 }}"
- - "{{ contiv_rpc_port3 }}"
- - "{{ contiv_etcd_port }}"
- - "{{ kube_master_api_port }}"
- when: iptablesrules.stdout.find("contiv") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472"
- when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789"
- when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow from contivh0
- command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input"
- when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow to contivh0
- command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output"
- when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow from contivh1
- command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input"
- when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow to contivh1
- command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output"
- when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow dns
- command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns"
- when: iptablesrules.stdout.find("contiv dns") == -1
- notify: Save iptables rules
diff --git a/roles/contiv/tasks/old_version_cleanup.yml b/roles/contiv/tasks/old_version_cleanup.yml
new file mode 100644
index 000000000..8b3d88096
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup.yml
@@ -0,0 +1,43 @@
+---
+- name: Old version cleanup | Check if old auth proxy service exists
+ stat:
+ path: /etc/systemd/system/auth-proxy.service
+ register: auth_proxy_stat
+
+- name: Old version cleanup | Stop old auth proxy
+ service:
+ name: auth-proxy
+ enabled: no
+ state: stopped
+ when: auth_proxy_stat.stat.exists
+
+# Note(NB): The new containerized contiv-etcd service uses the same data
+# directory on the host, so etcd data is not lost.
+- name: Old version cleanup | Check if old contiv-etcd service exists
+ stat:
+ path: /etc/systemd/system/contiv-etcd.service
+ register: contiv_etcd_stat
+
+- name: Old version cleanup | Stop old contiv-etcd
+ service:
+ name: contiv-etcd
+ enabled: no
+ state: stopped
+ when: contiv_etcd_stat.stat.exists
+
+- name: Old version cleanup | Delete old files
+ file:
+ state: absent
+ path: "{{ item }}"
+ with_items:
+ - /etc/systemd/system/auth-proxy.service
+ - /var/contiv/certs
+ - /usr/bin/auth_proxy.sh
+ - /etc/systemd/system/contiv-etcd.service
+ - /etc/systemd/system/contiv-etcd.service.d
+
+- include_tasks: old_version_cleanup_iptables.yml
+ when: not contiv_has_firewalld and contiv_has_iptables
+
+- include_tasks: old_version_cleanup_firewalld.yml
+ when: contiv_has_firewalld
diff --git a/roles/contiv/tasks/old_version_cleanup_firewalld.yml b/roles/contiv/tasks/old_version_cleanup_firewalld.yml
new file mode 100644
index 000000000..675a6358a
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup_firewalld.yml
@@ -0,0 +1,11 @@
+---
+- name: Old version cleanup | Delete old firewalld rules
+ firewalld:
+ state: absent
+ immediate: true
+ permanent: true
+ port: "{{ item }}"
+ with_items:
+ - "9999/tcp"
+ - "6640/tcp"
+ - "8472/udp"
diff --git a/roles/contiv/tasks/old_version_cleanup_iptables.yml b/roles/contiv/tasks/old_version_cleanup_iptables.yml
new file mode 100644
index 000000000..513357606
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup_iptables.yml
@@ -0,0 +1,44 @@
+---
+- name: Old version cleanup | Delete old forward [in] iptables rules
+ iptables:
+ state: absent
+ chain: FORWARD
+ in_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: "{{ item }} FORWARD input"
+ with_items:
+ - contivh0
+ - contivh1
+ notify: Save iptables rules
+
+- name: Old version cleanup | Delete old forward [out] iptables rules
+ iptables:
+ state: absent
+ chain: FORWARD
+ out_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: "{{ item }} FORWARD output"
+ with_items:
+ - contivh0
+ - contivh1
+ notify: Save iptables rules
+
+- name: Old version cleanup | Delete old input iptables rules
+ iptables:
+ state: absent
+ chain: INPUT
+ protocol: "{{ item.split('/')[1] }}"
+ match: "{{ item.split('/')[1] }}"
+ destination_port: "{{ item.split('/')[0] }}"
+ comment: "{{ item.split('/')[2] }}"
+ jump: ACCEPT
+ with_items:
+ - "53/udp/contiv dns"
+ - "4789/udp/netplugin vxlan 4789"
+ - "8472/udp/netplugin vxlan 8472"
+ - "9003/tcp/contiv"
+ - "9002/tcp/contiv"
+ - "9001/tcp/contiv"
+ - "9999/tcp/contiv"
+ - "10000/tcp/Contiv auth proxy service (10000)"
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml
index 5c92e90e9..21ba6ead4 100644
--- a/roles/contiv/tasks/ovs.yml
+++ b/roles/contiv/tasks/ovs.yml
@@ -1,6 +1,6 @@
---
- include_tasks: packageManagerInstall.yml
- when: source_type == "packageManager"
+ when: contiv_source_type == "packageManager"
tags:
- binary-update
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index d5726476c..8c8e7a7bd 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -4,10 +4,9 @@
did_install: false
- include_tasks: pkgMgrInstallers/centos-install.yml
- when: (ansible_os_family == "RedHat") and
- not is_atomic
+ when: ansible_os_family == "RedHat" and not openshift_is_atomic | bool
- name: Package Manager | Set fact saying we did CentOS package install
set_fact:
did_install: true
- when: (ansible_os_family == "RedHat")
+ when: ansible_os_family == "RedHat"
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 53c5b4099..2c82973d6 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -12,9 +12,9 @@
dest: /tmp/rdo-release-ocata-2.noarch.rpm
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
tags:
- ovs_install
@@ -30,9 +30,9 @@
pkg=openvswitch
state=present
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
tags:
- ovs_install
register: result
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 9b3f12567..e2813c99d 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,10 +1,10 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service
[Service]
-ExecStart={{ bin_dir }}/aci_gw.sh start
-ExecStop={{ bin_dir }}/aci_gw.sh stop
+ExecStart={{ contiv_bin_dir }}/aci_gw.sh start
+ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2
index ab4ad46a6..5ff349945 100644
--- a/roles/contiv/templates/aci_gw.j2
+++ b/roles/contiv/templates/aci_gw.j2
@@ -11,13 +11,13 @@ start)
set -e
docker run --net=host \
- -e "APIC_URL={{ apic_url }}" \
- -e "APIC_USERNAME={{ apic_username }}" \
- -e "APIC_PASSWORD={{ apic_password }}" \
- -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \
- -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \
- -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \
- -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \
+ -e "APIC_URL={{ contiv_apic_url }}" \
+ -e "APIC_USERNAME={{ contiv_apic_username }}" \
+ -e "APIC_PASSWORD={{ contiv_apic_password }}" \
+ -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \
+ -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \
+ -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \
+ -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \
--name=contiv-aci-gw \
contiv/aci-gw
;;
diff --git a/roles/contiv/templates/api-proxy-daemonset.yml.j2 b/roles/contiv/templates/api-proxy-daemonset.yml.j2
new file mode 100644
index 000000000..a15073580
--- /dev/null
+++ b/roles/contiv/templates/api-proxy-daemonset.yml.j2
@@ -0,0 +1,57 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-api-proxy
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-api-proxy
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-api-proxy
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-api-proxy
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: contiv-api-proxy
+ image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}"
+ args:
+ - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}"
+ - --tls-key-file=/var/contiv/api_proxy_key.pem
+ - --tls-certificate=/var/contiv/api_proxy_cert.pem
+ - "--data-store-address={{ etcd_host }}"
+ - --data-store-driver=etcd
+ - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}"
+ ports:
+ - containerPort: "{{ contiv_api_proxy_port }}"
+ hostPort: "{{ contiv_api_proxy_port }}"
+ volumeMounts:
+ - name: secret-volume
+ mountPath: /var/contiv
+ readOnly: true
+ volumes:
+ - name: secret-volume
+ secret:
+ secretName: contiv-api-proxy-secret
diff --git a/roles/contiv/templates/api-proxy-secrets.yml.j2 b/roles/contiv/templates/api-proxy-secrets.yml.j2
new file mode 100644
index 000000000..cd800c97d
--- /dev/null
+++ b/roles/contiv/templates/api-proxy-secrets.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: contiv-api-proxy-secret
+ namespace: kube-system
+ labels:
+ name: contiv-api-proxy-secret
+# Use data+b64encode, because stringData doesn't preserve newlines.
+data:
+ api_proxy_key.pem: "{{ key | b64encode }}"
+ api_proxy_cert.pem: "{{ cert | b64encode }}"
diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2
index f0e99c556..1dce9fcc2 100644
--- a/roles/contiv/templates/contiv.cfg.j2
+++ b/roles/contiv/templates/contiv.cfg.j2
@@ -1,5 +1,5 @@
{
- "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
"K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt",
"K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key",
"K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt",
diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2
index fac8e3c4c..ca29b8001 100644
--- a/roles/contiv/templates/contiv.cfg.master.j2
+++ b/roles/contiv/templates/contiv.cfg.master.j2
@@ -1,5 +1,5 @@
{
- "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
"K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt",
"K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key",
"K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt",
diff --git a/roles/contiv/templates/etcd-daemonset.yml.j2 b/roles/contiv/templates/etcd-daemonset.yml.j2
new file mode 100644
index 000000000..76937e670
--- /dev/null
+++ b/roles/contiv/templates/etcd-daemonset.yml.j2
@@ -0,0 +1,83 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-etcd
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-etcd
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-etcd
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-etcd
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ initContainers:
+ - name: contiv-etcd-init
+ image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}"
+ env:
+ - name: ETCD_INIT_ARGSFILE
+ value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
+ - name: ETCD_INIT_LISTEN_PORT
+ value: "{{ contiv_etcd_port }}"
+ - name: ETCD_INIT_PEER_PORT
+ value: "{{ contiv_etcd_peer_port }}"
+ - name: ETCD_INIT_CLUSTER
+ value: "{{ contiv_etcd_peers }}"
+ - name: ETCD_INIT_DATA_DIR
+ value: "{{ contiv_etcd_data_dir }}"
+ volumeMounts:
+ - name: contiv-etcd-conf-dir
+ mountPath: "{{ contiv_etcd_conf_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ containers:
+ - name: contiv-etcd
+ image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
+ command:
+ - sh
+ - -c
+ - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")'
+ env:
+ - name: ETCD_INIT_ARGSFILE
+ value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
+ volumeMounts:
+ - name: contiv-etcd-conf-dir
+ mountPath: "{{ contiv_etcd_conf_dir }}"
+ - name: contiv-etcd-data-dir
+ mountPath: "{{ contiv_etcd_data_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ volumes:
+ - name: contiv-etcd-data-dir
+ hostPath:
+ type: DirectoryOrCreate
+ path: "{{ contiv_etcd_data_dir }}"
+ - name: contiv-etcd-conf-dir
+ hostPath:
+ type: DirectoryOrCreate
+ path: "{{ contiv_etcd_conf_dir }}"
diff --git a/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2
new file mode 100644
index 000000000..4ec6cfd76
--- /dev/null
+++ b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2
@@ -0,0 +1,55 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-etcd-proxy
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-etcd-proxy
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-etcd-proxy
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-etcd
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: NotIn
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: contiv-etcd-proxy
+ image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
+ command:
+ - etcd
+ - "--proxy=on"
+ - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
+ - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
+ - "--initial-cluster={{ contiv_etcd_peers }}"
+ - "--data-dir={{ contiv_etcd_data_dir }}"
+ volumeMounts:
+ - name: contiv-etcd-data-dir
+ mountPath: "{{ contiv_etcd_data_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ volumes:
+ - name: contiv-etcd-data-dir
+ emptyDir: {}
diff --git a/roles/contiv/templates/etcd-scc.yml.j2 b/roles/contiv/templates/etcd-scc.yml.j2
new file mode 100644
index 000000000..6c4bb1d1e
--- /dev/null
+++ b/roles/contiv/templates/etcd-scc.yml.j2
@@ -0,0 +1,42 @@
+allowHostDirVolumePlugin: true
+allowHostIPC: false
+allowHostNetwork: true
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities: []
+allowedFlexVolumes: []
+apiVersion: v1
+defaultAddCapabilities: []
+fsGroup:
+ ranges:
+ - max: "{{ contiv_etcd_system_gid }}"
+ min: "{{ contiv_etcd_system_gid }}"
+ type: MustRunAs
+groups: []
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: 'For contiv-etcd only.'
+ creationTimestamp: null
+ name: contiv-etcd
+priority: null
+readOnlyRootFilesystem: true
+requiredDropCapabilities:
+- KILL
+- MKNOD
+- SETUID
+- SETGID
+runAsUser:
+ type: MustRunAs
+ uid: "{{ contiv_etcd_system_uid }}"
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: MustRunAs
+users:
+- system:serviceaccount:kube-system:contiv-etcd
+volumes:
+- emptyDir
+- hostPath
+- secret
diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2
deleted file mode 100644
index 5b5c84a2e..000000000
--- a/roles/contiv/templates/netmaster.env.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes'
-
diff --git a/roles/contiv/templates/netmaster.j2 b/roles/contiv/templates/netmaster.j2
new file mode 100644
index 000000000..c9db122b5
--- /dev/null
+++ b/roles/contiv/templates/netmaster.j2
@@ -0,0 +1 @@
+NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
index ce7d0c75e..b7289bc38 100644
--- a/roles/contiv/templates/netmaster.service
+++ b/roles/contiv/templates/netmaster.service
@@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
[Service]
EnvironmentFile=/etc/default/netmaster
-ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
+ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2
index a4928cc3d..0fd727401 100644
--- a/roles/contiv/templates/netplugin.j2
+++ b/roles/contiv/templates/netplugin.j2
@@ -1,7 +1,6 @@
{% if contiv_encap_mode == "vlan" %}
-NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
{% endif %}
{% if contiv_encap_mode == "vxlan" %}
-NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
{% endif %}
-
diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service
index 6358d89ec..2e1ca1bdf 100644
--- a/roles/contiv/templates/netplugin.service
+++ b/roles/contiv/templates/netplugin.service
@@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
[Service]
EnvironmentFile=/etc/default/netplugin
-ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS
+ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md
deleted file mode 100644
index 287b6c148..000000000
--- a/roles/contiv_auth_proxy/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-Role Name
-=========
-
-Role to install Contiv API Proxy and UI
-
-Requirements
-------------
-
-Docker needs to be installed to run the auth proxy container.
-
-Role Variables
---------------
-
-auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container.
-auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates.
-auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address.
-
-Dependencies
-------------
-
-docker
-
-Example Playbook
-----------------
-
-- hosts: netplugin-node
- become: true
- roles:
- - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 }
diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml
deleted file mode 100644
index e1d904c6a..000000000
--- a/roles/contiv_auth_proxy/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-auth_proxy_image: "contiv/auth_proxy:1.1.1"
-auth_proxy_port: 10000
-contiv_certs: "/var/contiv/certs"
-cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
-auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
-auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
-auth_proxy_datastore: "{{ cluster_store }}"
-auth_proxy_binaries: "/var/contiv_cache"
-auth_proxy_local_install: False
-auth_proxy_rule_comment: "Contiv auth proxy service"
-service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}"
diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service
deleted file mode 100644
index 7cd2edff1..000000000
--- a/roles/contiv_auth_proxy/files/auth-proxy.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Contiv Proxy and UI
-After=auditd.service systemd-user-sessions.service time-sync.target docker.service
-
-[Service]
-ExecStart=/usr/bin/auth_proxy.sh start
-ExecStop=/usr/bin/auth_proxy.sh stop
-KillMode=control-group
-Restart=on-failure
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem
deleted file mode 100644
index 63df4603f..000000000
--- a/roles/contiv_auth_proxy/files/cert.pem
+++ /dev/null
@@ -1,33 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV
-BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM
-BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j
-YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL
-MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG
-A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0
-aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl
-MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p
-7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06
-grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl
-yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L
-DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje
-XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4
-dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1
-hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N
-wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq
-FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV
-HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot//
-iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN
-BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2
-nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R
-/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW
-SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB
-PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It
-X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ
-yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf
-0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv
-DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM
-XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX
-jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA=
------END CERTIFICATE-----
diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem
deleted file mode 100644
index 7224e569c..000000000
--- a/roles/contiv_auth_proxy/files/key.pem
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d
-5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj
-v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8
-Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3
-3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn
-2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md
-qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL
-+J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI
-M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74
-Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh
-41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA
-AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP
-2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk
-PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5
-9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB
-UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O
-m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj
-RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H
-0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk
-evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk
-MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l
-kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8
-nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68
-4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29
-5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh
-YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf
-M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR
-wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh
-rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn
-yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo
-20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc
-RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1
-IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3
-7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3
-FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou
-qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb
-58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN
-JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI
-ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg
-0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g
-UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm
-zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA
-RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg
-hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h
-+YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd
-1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm
-qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS
-QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt
-d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft
------END RSA PRIVATE KEY-----
diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml
deleted file mode 100644
index 9cb9bea49..000000000
--- a/roles/contiv_auth_proxy/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for auth_proxy
diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml
deleted file mode 100644
index a29659cc9..000000000
--- a/roles/contiv_auth_proxy/tasks/cleanup.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: stop auth-proxy container
- service: name=auth-proxy state=stopped
-
-- name: cleanup iptables for auth proxy
- shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
- become: true
- with_items:
- - "{{ auth_proxy_port }}"
diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml
deleted file mode 100644
index 74e7bf794..000000000
--- a/roles/contiv_auth_proxy/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# tasks file for auth_proxy
-- name: setup iptables for auth proxy
- shell: >
- ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \
- iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
- become: true
- with_items:
- - "{{ auth_proxy_port }}"
-
-# Load the auth-proxy-image from local tar. Ignore any errors to handle the
-# case where the image is not built in
-- name: copy auth-proxy image
- copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar
- when: auth_proxy_local_install == True
-
-- name: load auth-proxy image
- shell: docker load -i /tmp/auth-proxy-image.tar
- when: auth_proxy_local_install == True
-
-- name: create cert folder for proxy
- file: path=/var/contiv/certs state=directory
-
-- name: copy shell script for starting auth-proxy
- template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx
-
-- name: copy cert for starting auth-proxy
- copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r
-
-- name: copy key for starting auth-proxy
- copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r
-
-- name: copy systemd units for auth-proxy
- copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service
-
-- name: start auth-proxy container
- systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes
diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2
deleted file mode 100644
index 0ab8c831b..000000000
--- a/roles/contiv_auth_proxy/templates/auth_proxy.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-usage="$0 start/stop"
-if [ $# -ne 1 ]; then
- echo USAGE: $usage
- exit 1
-fi
-
-case $1 in
-start)
- set -e
-
- /usr/bin/docker run --rm \
- -p 10000:{{ auth_proxy_port }} \
- --net=host --name=auth-proxy \
- -e NO_NETMASTER_STARTUP_CHECK=1 \
- -v /var/contiv:/var/contiv:z \
- {{ auth_proxy_image }} \
- --tls-key-file={{ auth_proxy_key }} \
- --tls-certificate={{ auth_proxy_cert }} \
- --data-store-address={{ auth_proxy_datastore }} \
- --netmaster-address={{ service_vip }}:9999 \
- --listen-address=:10000
- ;;
-
-stop)
- # don't stop on error
- /usr/bin/docker stop auth-proxy
- /usr/bin/docker rm -f -v auth-proxy
- ;;
-
-*)
- echo USAGE: $usage
- exit 1
- ;;
-esac
diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory
deleted file mode 100644
index d18580b3c..000000000
--- a/roles/contiv_auth_proxy/tests/inventory
+++ /dev/null
@@ -1 +0,0 @@
-localhost \ No newline at end of file
diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml
deleted file mode 100644
index 2af3250cd..000000000
--- a/roles/contiv_auth_proxy/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- remote_user: root
- roles:
- - auth_proxy
diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml
deleted file mode 100644
index 9032766c4..000000000
--- a/roles/contiv_auth_proxy/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for auth_proxy
diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml
index 7b8150954..c1622c56a 100644
--- a/roles/contiv_facts/defaults/main.yaml
+++ b/roles/contiv_facts/defaults/main.yaml
@@ -1,13 +1,10 @@
---
# The directory where binaries are stored on Ansible
# managed systems.
-bin_dir: /usr/bin
+contiv_bin_dir: /usr/bin
# The directory used by Ansible to temporarily store
# files on Ansible managed systems.
-ansible_temp_dir: /tmp/.ansible/files
+contiv_ansible_temp_dir: /tmp/.ansible/files
-source_type: packageManager
-
-# Whether or not to also install and enable the Contiv auth_proxy
-contiv_enable_auth_proxy: false
+contiv_source_type: packageManager
diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml
index 932ff091a..b8239a636 100644
--- a/roles/contiv_facts/tasks/fedora-install.yml
+++ b/roles/contiv_facts/tasks/fedora-install.yml
@@ -11,9 +11,9 @@
retries: 5
delay: 10
environment:
- https_proxy: "{{ https_proxy }}"
- http_proxy: "{{ http_proxy }}"
- no_proxy: "{{ no_proxy }}"
+ https_proxy: "{{ contiv_https_proxy }}"
+ http_proxy: "{{ contiv_http_proxy }}"
+ no_proxy: "{{ contiv_no_proxy }}"
- name: Install libselinux-python
command: dnf install {{ item }} -y
@@ -21,6 +21,6 @@
- python-dnf
- libselinux-python
environment:
- https_proxy: "{{ https_proxy }}"
- http_proxy: "{{ http_proxy }}"
- no_proxy: "{{ no_proxy }}"
+ https_proxy: "{{ contiv_https_proxy }}"
+ http_proxy: "{{ contiv_http_proxy }}"
+ no_proxy: "{{ contiv_no_proxy }}"
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 3267a4ab0..11f1e1369 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -1,60 +1,31 @@
---
-- name: Determine if Atomic
- stat: path=/run/ostree-booted
- register: s
- changed_when: false
- check_mode: no
-
-- name: Init the is_atomic fact
- set_fact:
- is_atomic: false
-
-- name: Set the is_atomic fact
- set_fact:
- is_atomic: true
- when: s.stat.exists
-
- name: Determine if CoreOS
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
register: distro
check_mode: no
-- name: Init the is_coreos fact
+- name: Init the contiv_is_coreos fact
set_fact:
- is_coreos: false
+ contiv_is_coreos: false
-- name: Set the is_coreos fact
+- name: Set the contiv_is_coreos fact
set_fact:
- is_coreos: true
+ contiv_is_coreos: true
when: "'CoreOS' in distro.stdout"
-- name: Set docker config file directory
- set_fact:
- docker_config_dir: "/etc/sysconfig"
-
-- name: Override docker config file directory for Debian
- set_fact:
- docker_config_dir: "/etc/default"
- when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
-
-- name: Create config file directory
- file:
- path: "{{ docker_config_dir }}"
- state: directory
-
- name: Set the bin directory path for CoreOS
set_fact:
- bin_dir: "/opt/bin"
- when: is_coreos
+ contiv_bin_dir: "/opt/bin"
+ when: contiv_is_coreos
- name: Create the directory used to store binaries
file:
- path: "{{ bin_dir }}"
+ path: "{{ contiv_bin_dir }}"
state: directory
- name: Create Ansible temp directory
file:
- path: "{{ ansible_temp_dir }}"
+ path: "{{ contiv_ansible_temp_dir }}"
state: directory
- name: Determine if has rpm
@@ -63,26 +34,26 @@
changed_when: false
check_mode: no
-- name: Init the has_rpm fact
+- name: Init the contiv_has_rpm fact
set_fact:
- has_rpm: false
+ contiv_has_rpm: false
-- name: Set the has_rpm fact
+- name: Set the contiv_has_rpm fact
set_fact:
- has_rpm: true
+ contiv_has_rpm: true
when: s.stat.exists
-- name: Init the has_firewalld fact
+- name: Init the contiv_has_firewalld fact
set_fact:
- has_firewalld: false
+ contiv_has_firewalld: false
-- name: Init the has_iptables fact
+- name: Init the contiv_has_iptables fact
set_fact:
- has_iptables: false
+ contiv_has_iptables: false
# collect information about what packages are installed
- include_tasks: rpm.yml
- when: has_rpm
+ when: contiv_has_rpm
- include_tasks: fedora-install.yml
- when: not is_atomic and ansible_distribution == "Fedora"
+ when: not openshift_is_atomic and ansible_distribution == "Fedora"
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index d12436f96..dc6c5d3b7 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -13,9 +13,9 @@
failed_when: false
check_mode: no
-- name: Set the has_firewalld fact
+- name: Set the contiv_has_firewalld fact
set_fact:
- has_firewalld: true
+ contiv_has_firewalld: true
when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
@@ -25,7 +25,7 @@
failed_when: false
check_mode: no
-- name: Set the has_iptables fact
+- name: Set the contiv_has_iptables fact
set_fact:
- has_iptables: true
+ contiv_has_iptables: true
when: s.rc == 0
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 86cea5c46..87e249642 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: ''
l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
# runc, docker, host
-r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}"
r_etcd_common_embedded_etcd: false
osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
@@ -98,4 +98,4 @@ r_etcd_os_firewall_allow:
# set the backend quota to 4GB by default
etcd_quota_backend_bytes: 4294967296
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index f2e1fc310..af58eff62 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -17,6 +17,5 @@ galaxy_info:
- system
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
- role: lib_utils
- role: openshift_facts
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index ccfd9da14..881a8c270 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -1,7 +1,7 @@
---
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
index 119071a72..ce295d2f5 100644
--- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
@@ -28,7 +28,7 @@
etcd_client_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool
else (False in (g_external_etcd_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure generated_certs directory present
@@ -57,6 +57,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the client crt
delegated_serial_command:
command: >
@@ -79,13 +80,6 @@
when: etcd_client_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX
- register: g_etcd_client_mktemp
- changed_when: False
- when: etcd_client_certs_missing | bool
- become: no
-
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz
@@ -101,8 +95,7 @@
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ g_etcd_client_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_client_certs_missing | bool
@@ -116,10 +109,15 @@
- name: Unarchive etcd cert tarballs
unarchive:
- src: "{{ g_etcd_client_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_client_certs_missing | bool
+- name: Delete temporary directory
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
+ changed_when: False
+ when: etcd_client_certs_missing | bool
+
- file:
path: "{{ etcd_cert_config_dir }}/{{ item }}"
owner: root
@@ -130,9 +128,3 @@
- "{{ etcd_cert_prefix }}client.key"
- "{{ etcd_cert_prefix }}ca.crt"
when: etcd_client_certs_missing | bool
-
-- name: Delete temporary directory
- local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
- changed_when: False
- when: etcd_client_certs_missing | bool
- become: no
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index deb2301d7..7c8b87d99 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -21,7 +21,7 @@
etcd_server_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool
else (False in (g_etcd_server_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure generated_certs directory present
@@ -50,6 +50,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the server crt
delegated_serial_command:
command: >
@@ -83,6 +84,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the peer crt
delegated_serial_command:
command: >
@@ -105,13 +107,6 @@
when: etcd_server_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX
- become: no
- register: g_etcd_server_mktemp
- changed_when: False
- when: etcd_server_certs_missing | bool
-
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz
@@ -127,8 +122,7 @@
- name: Retrieve etcd cert tarball
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ g_etcd_server_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_server_certs_missing | bool
@@ -144,7 +138,7 @@
- name: Unarchive cert tarball
unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing | bool
@@ -161,8 +155,7 @@
- name: Retrieve etcd ca cert tarball
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ g_etcd_server_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_server_certs_missing | bool
@@ -177,8 +170,7 @@
when: etcd_server_certs_missing | bool
- name: Delete temporary directory
- local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
- become: no
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
changed_when: False
when: etcd_server_certs_missing | bool
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index a4b0ff31d..3d945344c 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- {{ openshift.common.client_binary }} adm migrate etcd-ttl \
+ {{ openshift_client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml
index 54a9c74ff..630640ab1 100644
--- a/roles/etcd/tasks/migration/migrate.yml
+++ b/roles/etcd/tasks/migration/migrate.yml
@@ -1,7 +1,7 @@
---
# Should this be run in a serial manner?
- set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+ l_etcd_service: "{{ 'etcd_container' if (openshift_is_containerized | bool) else 'etcd' }}"
- name: Migrate etcd data
command: >
diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml
index fe1e418d8..ab3626cec 100644
--- a/roles/etcd/tasks/version_detect.yml
+++ b/roles/etcd/tasks/version_detect.yml
@@ -12,7 +12,7 @@
- debug:
msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
when:
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
- block:
- name: Record containerized etcd version (docker)
@@ -52,4 +52,4 @@
- debug:
msg: "Etcd containerized version {{ etcd_container_version }} detected"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 2e4a0dc39..d9e4d2354 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -6,4 +6,4 @@ etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 7d79bd3d4..f94399fab 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -21,3 +21,7 @@
until: not (l_restart_node_result is failed)
retries: 3
delay: 30
+
+- name: save iptable rules
+ become: yes
+ command: 'iptables-save'
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 51128dba6..7634b8192 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index 9b9250f31..11981fb80 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Install flannel
become: yes
package: name=flannel state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -41,3 +41,13 @@
notify:
- restart docker
- restart node
+
+- name: Enable Pod to Pod communication
+ command: /sbin/iptables --wait -I FORWARD -d {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -i {{ flannel_interface }} -j ACCEPT -m comment --comment "Pod to Pod communication"
+ notify:
+ - save iptable rules
+
+- name: Allow external network access
+ command: /sbin/iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE -m comment --comment "Allow external network access"
+ notify:
+ - save iptable rules
diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml
index 73bddcca4..1e44ff5ba 100644
--- a/roles/flannel_register/meta/main.yml
+++ b/roles/flannel_register/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 83ca83350..da7e7b1da 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -31,6 +31,7 @@ class CallbackModule(CallbackBase):
'installer_phase_node',
'installer_phase_glusterfs',
'installer_phase_hosted',
+ 'installer_phase_web_console',
'installer_phase_metrics',
'installer_phase_logging',
'installer_phase_prometheus',
@@ -80,6 +81,10 @@ class CallbackModule(CallbackBase):
'title': 'Hosted Install',
'playbook': 'playbooks/openshift-hosted/config.yml'
},
+ 'installer_phase_web_console': {
+ 'title': 'Web Console Install',
+ 'playbook': 'playbooks/openshift-web-console/config.yml'
+ },
'installer_phase_metrics': {
'title': 'Metrics Install',
'playbook': 'playbooks/openshift-metrics/config.yml'
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
index 7fd5adf41..7eb8ed781 100644
--- a/roles/kuryr/meta/main.yml
+++ b/roles/kuryr/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_utils
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index 08f2d5adc..41d0ead20 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -40,7 +40,7 @@
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
- line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+ line: 'OPTIONS="\1 --disable proxy"'
- name: force node restart to disable the proxy
service:
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
index 39348ae90..09f4c7dfe 100644
--- a/roles/kuryr/templates/cni-daemonset.yaml.j2
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -26,6 +26,13 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: IfNotPresent
command: [ "cni_ds_init" ]
+ env:
+ - name: CNI_DAEMON
+ value: "True"
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@@ -38,6 +45,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
+ - name: proc
+ mountPath: /host_proc
+ - name: openvswitch
+ mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@@ -50,4 +61,10 @@ spec:
name: kuryr-config
- name: etc
hostPath:
- path: /etc \ No newline at end of file
+ path: /etc
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: openvswitch
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 96c215f00..4bf1dbddf 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -16,17 +16,17 @@ data:
# Directory for Kuryr vif binding executables. (string value)
#bindir = /usr/libexec/kuryr
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
- # DEPRECATED: If set to false, the logging level will be set to WARNING instead
- # of the default INFO level. (boolean value)
- # This option is deprecated for removal.
- # Its value may be silently ignored in the future.
- #verbose = true
-
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
@@ -46,7 +46,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/kuryr-controller.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -65,13 +65,19 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
- #use_stderr = true
+ #use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
@@ -93,7 +99,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -106,15 +112,86 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
- driver = kuryr.lib.binding.drivers.vlan
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
link_iface = eth0
+ driver = kuryr.lib.binding.drivers.vlan
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
+
[kubernetes]
#
@@ -164,11 +241,6 @@ data:
# The driver that manages VIFs pools for Kubernetes Pods (string value)
vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
- [vif_pool]
- ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
- ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
- ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
- ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
@@ -232,13 +304,55 @@ data:
external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
+
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
+
kuryr-cni.conf: |+
[DEFAULT]
#
# From kuryr_kubernetes
#
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
@@ -263,7 +377,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/cni.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -282,6 +396,12 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
@@ -310,7 +430,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -323,14 +443,85 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
+ link_iface = eth0
driver = kuryr.lib.binding.drivers.vlan
- link_iface = {{ kuryr_cni_link_interface }}
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
[kubernetes]
@@ -341,12 +532,136 @@ data:
# The root URL of the Kubernetes API (string value)
api_root = {{ openshift.master.api_url }}
- # The token to talk to the k8s API
- token_file = /etc/kuryr/token
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
# Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
- ssl_ca_crt_file = /etc/kuryr/ca.crt
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
# HTTPS K8S_API server identity verification (boolean value)
# TODO (apuimedo): Make configurable
ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
index d970270b5..155d1faab 100644
--- a/roles/kuryr/templates/controller-deployment.yaml.j2
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -22,6 +22,13 @@ spec:
- image: kuryr/controller:latest
imagePullPolicy: IfNotPresent
name: controller
+{% if kuryr_openstack_enable_pools | default(false) %}
+ readinessProbe:
+ exec:
+ command:
+ - cat
+ - /tmp/pools_loaded
+{% endif %}
terminationMessagePath: "/dev/termination-log"
# FIXME(dulek): This shouldn't be required, but without it selinux is
# complaining about access to kuryr.conf.
diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/lib_openshift/library/conditional_set_fact.py
index f61801714..363399f33 100644
--- a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
+++ b/roles/lib_openshift/library/conditional_set_fact.py
@@ -29,6 +29,10 @@ EXAMPLES = '''
fact1: not_defined_variable
fact2: defined_variable
+- name: Conditionally set fact falling back on default
+ conditional_set_fact:
+ fact1: not_defined_var | defined_variable
+
'''
@@ -48,12 +52,14 @@ def run_module():
is_changed = False
for param in module.params['vars']:
- other_var = module.params['vars'][param]
-
- if other_var in module.params['facts']:
- local_facts[param] = module.params['facts'][other_var]
- if not is_changed:
- is_changed = True
+ other_vars = module.params['vars'][param].replace(" ", "")
+
+ for other_var in other_vars.split('|'):
+ if other_var in module.params['facts']:
+ local_facts[param] = module.params['facts'][other_var]
+ if not is_changed:
+ is_changed = True
+ break
return module.exit_json(changed=is_changed, # noqa: F405
ansible_facts=local_facts)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py
index d810735f2..9d10c84f3 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py
@@ -27,7 +27,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing a list '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 2,
@@ -71,8 +71,296 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a state present '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 2,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 2)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale up '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 3,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ dc_updated = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6559",
+ "generation": 9,
+ "creationTimestamp": "2017-01-24T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 3,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc_updated,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 3)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale down '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 1,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ dc_updated = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6560",
+ "generation": 9,
+ "creationTimestamp": "2017-01-24T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 1,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc_updated,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 1)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale failure '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 1,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ error_message = "foo"
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': error_message,
+ 'returncode': 1}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['failed'])
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing an unknown state '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 2,
+ 'state': 'unknown-state',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse('changed' in results)
+ self.assertEqual(results['failed'], True)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing scale '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 3,
@@ -120,8 +408,57 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing scale for replication controllers '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 3,
+ 'state': 'list',
+ 'kind': 'rc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ rc = '''{"kind": "ReplicationController",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 3,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get rc router -n default',
+ 'results': rc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc create -f /tmp/router -n default',
+ 'results': '',
+ 'returncode': 0}
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['result'][0], 3)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing scale for inexisting dc '''
params = {'name': 'not_there',
'namespace': 'default',
'replicas': 3,
@@ -205,7 +542,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup fallback '''
+ ''' Testing binary lookup fallback in py3 '''
mock_env_get.side_effect = lambda _v, _d: ''
@@ -217,7 +554,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in path '''
+ ''' Testing binary lookup in path in py3 '''
oc_bin = '/usr/bin/oc'
@@ -231,7 +568,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in /usr/local/bin '''
+ ''' Testing binary lookup in /usr/local/bin in py3 '''
oc_bin = '/usr/local/bin/oc'
@@ -245,7 +582,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in ~/bin '''
+ ''' Testing binary lookup in ~/bin in py3 '''
oc_bin = os.path.expanduser('~/bin/oc')
diff --git a/roles/lib_os_firewall/README.md b/roles/lib_os_firewall/README.md
deleted file mode 100644
index ba8c84865..000000000
--- a/roles/lib_os_firewall/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-lib_os_firewall
-===========
-
-lib_os_firewall manages iptables firewall settings for a minimal use
-case (Adding/Removing rules based on protocol and port number).
-
-Note: firewalld is not supported on Atomic Host
-https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-
-Requirements
-------------
-
-Ansible 2.2
-
-Role Variables
---------------
-
-| Name | Default | |
-|---------------------------|---------|----------------------------------------|
-| os_firewall_allow | [] | List of service,port mappings to allow |
-| os_firewall_deny | [] | List of service, port mappings to deny |
-
-Dependencies
-------------
-
-None.
-
-Example Playbook
-----------------
-
-Use iptables and open tcp ports 80 and 443:
-```
----
-- hosts: servers
- vars:
- os_firewall_use_firewalld: false
- os_firewall_allow:
- - service: httpd
- port: 80/tcp
- - service: https
- port: 443/tcp
- tasks:
- - include_role:
- name: lib_os_firewall
-
- - name: set allow rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: add
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- with_items: "{{ os_firewall_allow }}"
-```
-
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-Jason DeTiberus - jdetiber@redhat.com
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
index eb13a58ba..eb13a58ba 100644
--- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
+++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py
new file mode 100644
index 000000000..09ce55e8f
--- /dev/null
+++ b/roles/lib_utils/action_plugins/sanity_checks.py
@@ -0,0 +1,181 @@
+"""
+Ansible action plugin to ensure inventory variables are set
+appropriately and no conflicting options have been provided.
+"""
+import re
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+# Valid values for openshift_deployment_type
+VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
+
+# Tuple of variable names and default values if undefined.
+NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
+ ('openshift_use_flannel', False),
+ ('openshift_use_nuage', False),
+ ('openshift_use_contiv', False),
+ ('openshift_use_calico', False))
+
+ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
+v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
+v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
+You specified openshift_image_tag={}"""
+
+ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
+v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+You specified openshift_image_tag={}"""
+
+ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)',
+ 'error_msg': ORIGIN_TAG_REGEX_ERROR}
+ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
+ 'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
+IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
+ 'openshift-enterprise': ENTERPRISE_TAG_REGEX}
+
+CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release,
+you must set openshift_release or openshift_image_tag in your inventory to
+specify which version of the OpenShift component images to use.
+(Suggestion: add openshift_release="x.y" to inventory.)"""
+
+
+def to_bool(var_to_check):
+ """Determine a boolean value given the multiple
+ ways bools can be specified in ansible."""
+ # http://yaml.org/type/bool.html
+ yes_list = (True, 1, "True", "1", "true", "TRUE",
+ "Yes", "yes", "Y", "y", "YES",
+ "on", "ON", "On")
+ return var_to_check in yes_list
+
+
+class ActionModule(ActionBase):
+ """Action plugin to execute sanity checks."""
+ def template_var(self, hostvars, host, varname):
+ """Retrieve a variable from hostvars and template it.
+ If undefined, return None type."""
+ res = hostvars[host].get(varname)
+ if res is None:
+ return None
+ return self._templar.template(res)
+
+ def check_openshift_deployment_type(self, hostvars, host):
+ """Ensure a valid openshift_deployment_type is set"""
+ openshift_deployment_type = self.template_var(hostvars, host,
+ 'openshift_deployment_type')
+ if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
+ type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
+ msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
+ raise errors.AnsibleModuleError(msg)
+ return openshift_deployment_type
+
+ def check_python_version(self, hostvars, host, distro):
+ """Ensure python version is 3 for Fedora and python 2 for others"""
+ ansible_python = self.template_var(hostvars, host, 'ansible_python')
+ if distro == "Fedora":
+ if ansible_python['version']['major'] != 3:
+ msg = "openshift-ansible requires Python 3 for {};".format(distro)
+ msg += " For information on enabling Python 3 with Ansible,"
+ msg += " see https://docs.ansible.com/ansible/python_3_support.html"
+ raise errors.AnsibleModuleError(msg)
+ else:
+ if ansible_python['version']['major'] != 2:
+ msg = "openshift-ansible requires Python 2 for {};".format(distro)
+
+ def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
+ """Ensure openshift_image_tag is formatted correctly"""
+ openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
+ if not openshift_image_tag or openshift_image_tag == 'latest':
+ return None
+ regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
+ res = re.match(regex_to_match, str(openshift_image_tag))
+ if res is None:
+ msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
+ msg = msg.format(str(openshift_image_tag))
+ raise errors.AnsibleModuleError(msg)
+
+ def no_origin_image_version(self, hostvars, host, openshift_deployment_type):
+ """Ensure we can determine what image version to use with origin
+ fail when:
+ - openshift_is_containerized
+ - openshift_deployment_type == 'origin'
+ - openshift_release is not defined
+ - openshift_image_tag is not defined"""
+ if not openshift_deployment_type == 'origin':
+ return None
+ oic = self.template_var(hostvars, host, 'openshift_is_containerized')
+ if not to_bool(oic):
+ return None
+ orelease = self.template_var(hostvars, host, 'openshift_release')
+ oitag = self.template_var(hostvars, host, 'openshift_image_tag')
+ if not orelease and not oitag:
+ raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG)
+
+ def network_plugin_check(self, hostvars, host):
+ """Ensure only one type of network plugin is enabled"""
+ res = []
+ # Loop through each possible network plugin boolean, determine the
+ # actual boolean value, and append results into a list.
+ for plugin, default_val in NET_PLUGIN_LIST:
+ res_temp = self.template_var(hostvars, host, plugin)
+ if res_temp is None:
+ res_temp = default_val
+ res.append(to_bool(res_temp))
+
+ if sum(res) != 1:
+ plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
+
+ msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
+ raise errors.AnsibleModuleError(msg)
+
+ def check_hostname_vars(self, hostvars, host):
+ """Checks to ensure openshift_hostname
+ and openshift_public_hostname
+ conform to the proper length of 63 characters or less"""
+ for varname in ('openshift_public_hostname', 'openshift_hostname'):
+ var_value = self.template_var(hostvars, host, varname)
+ if var_value and len(var_value) > 63:
+ msg = '{} must be 63 characters or less'.format(varname)
+ raise errors.AnsibleModuleError(msg)
+
+ def run_checks(self, hostvars, host):
+ """Execute the hostvars validations against host"""
+ distro = self.template_var(hostvars, host, 'ansible_distribution')
+ odt = self.check_openshift_deployment_type(hostvars, host)
+ self.check_python_version(hostvars, host, distro)
+ self.check_image_tag_format(hostvars, host, odt)
+ self.no_origin_image_version(hostvars, host, odt)
+ self.network_plugin_check(hostvars, host)
+ self.check_hostname_vars(hostvars, host)
+
+ def run(self, tmp=None, task_vars=None):
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ # self.task_vars holds all in-scope variables.
+ # Ignore settting self.task_vars outside of init.
+ # pylint: disable=W0201
+ self.task_vars = task_vars or {}
+
+ # self._task.args holds task parameters.
+ # check_hosts is a parameter to this plugin, and should provide
+ # a list of hosts.
+ check_hosts = self._task.args.get('check_hosts')
+ if not check_hosts:
+ msg = "check_hosts is required"
+ raise errors.AnsibleModuleError(msg)
+
+ # We need to access each host's variables
+ hostvars = self.task_vars.get('hostvars')
+ if not hostvars:
+ msg = hostvars
+ raise errors.AnsibleModuleError(msg)
+
+ # We loop through each host in the provided list check_hosts
+ for host in check_hosts:
+ self.run_checks(hostvars, host)
+
+ result["changed"] = False
+ result["failed"] = False
+ result["msg"] = "Sanity Checks passed"
+
+ return result
diff --git a/callback_plugins/aa_version_requirement.py b/roles/lib_utils/callback_plugins/aa_version_requirement.py
index 1093acdae..1093acdae 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/roles/lib_utils/callback_plugins/aa_version_requirement.py
diff --git a/callback_plugins/openshift_quick_installer.py b/roles/lib_utils/callback_plugins/openshift_quick_installer.py
index c0fdbc650..365e2443d 100644
--- a/callback_plugins/openshift_quick_installer.py
+++ b/roles/lib_utils/callback_plugins/openshift_quick_installer.py
@@ -192,7 +192,7 @@ The only thing we change here is adding `log_only=True` to the
"""
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
- if result._task.action in ('include', 'include_role'):
+ if result._task.action in ('include', 'import_role'):
return
elif result._result.get('changed', False):
if delegated_vars:
@@ -220,7 +220,7 @@ The only thing we change here is adding `log_only=True` to the
def v2_runner_item_on_ok(self, result):
"""Print out task results for items you're iterating over"""
delegated_vars = result._result.get('_ansible_delegated_vars', None)
- if result._task.action in ('include', 'include_role'):
+ if result._task.action in ('include', 'import_role'):
return
elif result._result.get('changed', False):
msg = 'changed'
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
index a2bc9ecdb..58b228fee 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
@@ -31,7 +31,6 @@ certificates
Example playbook usage:
- name: Generate expiration results JSON
- become: no
run_once: yes
delegate_to: localhost
when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..9f73510c4
--- /dev/null
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+"""
+Custom filters for use in openshift-ansible
+"""
+import os
+import pdb
+import random
+import re
+
+from base64 import b64encode
+from collections import Mapping
+# pylint no-name-in-module and import-error disabled here because pylint
+# fails to properly detect the packages when installed in a virtualenv
+from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
+from operator import itemgetter
+
+import yaml
+
+from ansible import errors
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six import string_types, u
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+HAS_OPENSSL = False
+try:
+ import OpenSSL.crypto
+ HAS_OPENSSL = True
+except ImportError:
+ pass
+
+
+# pylint: disable=C0103
+
+def lib_utils_oo_pdb(arg):
+ """ This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | lib_utils_oo_pdb }}"
+ """
+ pdb.set_trace()
+ return arg
+
+
+def get_attr(data, attribute=None):
+ """ This looks up dictionary attributes of the form a.b.c and returns
+ the value.
+
+ If the key isn't present, None is returned.
+ Ex: data = {'a': {'b': {'c': 5}}}
+ attribute = "a.b.c"
+ returns 5
+ """
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ ptr = data
+ for attr in attribute.split('.'):
+ if attr in ptr:
+ ptr = ptr[attr]
+ else:
+ ptr = None
+ break
+
+ return ptr
+
+
+def oo_flatten(data):
+ """ This filter plugin will flatten a list of lists
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [item for sublist in data for item in sublist]
+
+
+def lib_utils_oo_collect(data_list, attribute=None, filters=None):
+ """ This takes a list of dict and collects all attributes specified into a
+ list. If filter is specified then we will include all items that
+ match _ALL_ of filters. If a dict entry is missing the key in a
+ filter it will be excluded from the match.
+ Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3]
+
+ This also deals with lists of lists with dict as elements.
+ Ex: data_list = [
+ [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'b':6, 'z': 'z'} # True, return
+ ],
+ [ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
+ ],
+ {'a':5, 'z': 'z'}, # True, return
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3, 5]
+ """
+ if not isinstance(data_list, list):
+ raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List")
+
+ if not attribute:
+ raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set")
+
+ data = []
+ retval = []
+
+ for item in data_list:
+ if isinstance(item, list):
+ retval.extend(lib_utils_oo_collect(item, attribute, filters))
+ else:
+ data.append(item)
+
+ if filters is not None:
+ if not isinstance(filters, dict):
+ raise errors.AnsibleFilterError(
+ "lib_utils_oo_collect expects filter to be a dict")
+ retval.extend([get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))])
+ else:
+ retval.extend([get_attr(d, attribute) for d in data])
+
+ retval = [val for val in retval if val is not None]
+
+ return retval
+
+
+def lib_utils_oo_select_keys_from_list(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [lib_utils_oo_select_keys(item, keys) for item in data]
+
+ return oo_flatten(retval)
+
+
+def lib_utils_oo_select_keys(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects to filter on a dict or object")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys if key in data]
+
+ return retval
+
+
+def lib_utils_oo_prepend_strings_in_list(data, prepend):
+ """ This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, string_types) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+
+def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
+ """Take a dict and arrange them as a list of dicts
+
+ Input data:
+ {'region': 'infra', 'test_k': 'test_v'}
+
+ Return data:
+ [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
+
+ Written for use of the oc_label module
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ rval = []
+ for label in data.items():
+ rval.append({key_title: label[0], value_title: label[1]})
+
+ return rval
+
+
+def oo_ami_selector(data, image_name):
+ """ This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+
+def lib_utils_oo_split(string, separator=','):
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
+ """
+ if isinstance(string, list):
+ return string
+ return string.split(separator)
+
+
+def lib_utils_oo_dict_to_keqv_list(data):
+ """Take a dict and return a list of k=v pairs
+
+ Input data:
+ {'a': 1, 'b': 2}
+
+ Return data:
+ ['a=1', 'b=2']
+ """
+ return ['='.join(str(e) for e in x) for x in data.items()]
+
+
+def lib_utils_oo_list_to_dict(lst, separator='='):
+ """ This converts a list of ["k=v"] to a dictionary {k: v}.
+ """
+ kvs = [i.split(separator) for i in lst]
+ return {k: v for k, v in kvs}
+
+
+def haproxy_backend_masters(hosts, port):
+ """ This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ """
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server['address'] = "%s:%s" % (server_ip, port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+
+# pylint: disable=too-many-branches
+def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
+ """ Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/root/custom1.crt",
+ "keyfile": "/root/custom1.key",
+ "cafile": "/root/custom-ca1.crt" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key",
+ "cafile": "custom-ca2.crt" }]
+
+ returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom1.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom2.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
+ "names": [ "some-hostname.com" ] }]
+ """
+ if not isinstance(named_certs_dir, string_types):
+ raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
+
+ if not isinstance(internal_hostnames, list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ if not HAS_OPENSSL:
+ raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except Exception:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = list(set(certificate['names']))
+ if 'cafile' not in certificate:
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+
+ for certificate in certificates:
+ # Update paths for configuration
+ certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
+ certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
+ if 'cafile' in certificate:
+ certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
+ return certificates
+
+
+def lib_utils_oo_generate_secret(num_bytes):
+ """ generate a session secret """
+
+ if not isinstance(num_bytes, int):
+ raise errors.AnsibleFilterError("|failed expects num_bytes is int")
+
+ return b64encode(os.urandom(num_bytes)).decode('utf-8')
+
+
+def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw):
+ """ returns a yaml snippet padded to match the indent level you specify """
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw))
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return "\n{0}".format(padded)
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
+
+
+def lib_utils_oo_pods_match_component(pods, deployment_type, component):
+ """ Filters a list of Pods and returns the ones matching the deployment_type and component
+ """
+ if not isinstance(pods, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(deployment_type, string_types):
+ raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
+ if not isinstance(component, string_types):
+ raise errors.AnsibleFilterError("failed expects component to be a string")
+
+ image_prefix = 'openshift/origin-'
+ if deployment_type == 'openshift-enterprise':
+ image_prefix = 'openshift3/ose-'
+
+ matching_pods = []
+ image_regex = image_prefix + component + r'.*'
+ for pod in pods:
+ for container in pod['spec']['containers']:
+ if re.search(image_regex, container['image']):
+ matching_pods.append(pod)
+ break # stop here, don't add a pod more than once
+
+ return matching_pods
+
+
+def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
+ """ Convert an image tag string to an RPM version if necessary
+ Empty strings and strings that are already in rpm version format
+ are ignored. Also remove non semantic version components.
+
+ Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
+ """
+ if not isinstance(version, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ if version.startswith("v"):
+ version = version[1:]
+ # Strip release from requested version, we no longer support this.
+ version = version.split('-')[0]
+
+ if include_dash and version and not version.startswith("-"):
+ version = "-" + version
+
+ return version
+
+
+def lib_utils_oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
+# pylint: disable=invalid-name, unused-argument
+def lib_utils_oo_loadbalancer_frontends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_frontends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(api_port)],
+ 'default_backend': 'atomic-openshift-api'}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_frontends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(nuage_rest_port)],
+ 'default_backend': 'nuage-monitor'})
+ return loadbalancer_frontends
+
+
+# pylint: disable=invalid-name
+def lib_utils_oo_loadbalancer_backends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_backends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': haproxy_backend_masters(servers_hostvars, api_port)}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ # pylint: disable=line-too-long
+ loadbalancer_backends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
+ return loadbalancer_backends
+
+
+def lib_utils_oo_chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
+
+
+def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
+ """Generates a random string of given length from a set of alphanumeric characters.
+ The default source uses [a-z][A-Z][0-9]
+ Ex:
+ - lib_utils_oo_random_word(3) => aB9
+ - lib_utils_oo_random_word(4, source='012') => 0123
+ """
+ return ''.join(random.choice(source) for i in range(length))
+
+
+def lib_utils_oo_contains_rule(source, apiGroups, resources, verbs):
+ '''Return true if the specified rule is contained within the provided source'''
+
+ rules = source['rules']
+
+ if rules:
+ for rule in rules:
+ if set(rule['apiGroups']) == set(apiGroups):
+ if set(rule['resources']) == set(resources):
+ if set(rule['verbs']) == set(verbs):
+ return True
+
+ return False
+
+
+def lib_utils_oo_selector_to_string_list(user_dict):
+ """Convert a dict of selectors to a key=value list of strings
+
+Given input of {'region': 'infra', 'zone': 'primary'} returns a list
+of items as ['region=infra', 'zone=primary']
+ """
+ selectors = []
+ for key in user_dict:
+ selectors.append("{}={}".format(key, user_dict[key]))
+ return selectors
+
+
+def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+ """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+ [
+ {
+ "name": "management-admin-dockercfg-p31s2"
+ },
+ {
+ "name": "management-admin-token-bnqsh"
+ }
+ ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+ `secret_hint` parameter. By default this is the secret holding the
+ SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+ - name: Get all SA Secrets
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+ - name: Save the SA bearer token secret name
+ set_fact:
+ management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
+
+ - name: Get the SA bearer token value
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ register: sa_secret
+
+ - name: Print the bearer token value
+ debug:
+ var: sa_secret.results.decoded.token
+
+ """
+ secret_name = None
+
+ for secret in sa_secrets:
+ # each secret is a hash
+ if secret['name'].find(secret_hint) == -1:
+ continue
+ else:
+ secret_name = secret['name']
+ break
+
+ return secret_name
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "lib_utils_oo_select_keys": lib_utils_oo_select_keys,
+ "lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list,
+ "lib_utils_oo_chomp_commit_offset": lib_utils_oo_chomp_commit_offset,
+ "lib_utils_oo_collect": lib_utils_oo_collect,
+ "lib_utils_oo_pdb": lib_utils_oo_pdb,
+ "lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list,
+ "lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict,
+ "lib_utils_oo_split": lib_utils_oo_split,
+ "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list,
+ "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict,
+ "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates,
+ "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret,
+ "lib_utils_oo_pods_match_component": lib_utils_oo_pods_match_component,
+ "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version,
+ "lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url,
+ "lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends,
+ "lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends,
+ "lib_utils_to_padded_yaml": lib_utils_to_padded_yaml,
+ "lib_utils_oo_random_word": lib_utils_oo_random_word,
+ "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
+ "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
+ "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "map_from_pairs": map_from_pairs
+ }
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
index dfcb11da3..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
index 003ce5f9e..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py
index ff15f693b..e67b19c28 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/lib_utils/filter_plugins/openshift_master.py
@@ -10,11 +10,7 @@ from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
-except ImportError:
- from ansible.module_utils.six import string_types, u
+from ansible.module_utils.six import string_types, u
import yaml
diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py
index 0cab1ca88..0cab1ca88 100755
--- a/roles/etcd/library/delegated_serial_command.py
+++ b/roles/lib_utils/library/delegated_serial_command.py
diff --git a/library/kubeclient_ca.py b/roles/lib_utils/library/kubeclient_ca.py
index a89a5574f..a89a5574f 100644
--- a/library/kubeclient_ca.py
+++ b/roles/lib_utils/library/kubeclient_ca.py
diff --git a/library/modify_yaml.py b/roles/lib_utils/library/modify_yaml.py
index 9b8f9ba33..9b8f9ba33 100755..100644
--- a/library/modify_yaml.py
+++ b/roles/lib_utils/library/modify_yaml.py
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py
index e355266b0..e355266b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/lib_utils/library/openshift_cert_expiry.py
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
index 440b8ec28..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
diff --git a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py b/roles/lib_utils/library/os_firewall_manage_iptables.py
index aeee3ede8..aeee3ede8 100755..100644
--- a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/lib_utils/library/os_firewall_manage_iptables.py
diff --git a/library/rpm_q.py b/roles/lib_utils/library/rpm_q.py
index 3dec50fc2..3dec50fc2 100644
--- a/library/rpm_q.py
+++ b/roles/lib_utils/library/rpm_q.py
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
new file mode 100644
index 000000000..4858c5ec6
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -0,0 +1,143 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, regions_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ predicates = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to enterprise short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ # Predicates ordered according to OpenShift Origin source:
+ # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
+ if short_version == '3.1':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+ ])
+
+ if short_version == '3.2':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+ ])
+
+ if short_version == '3.3':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+ ])
+
+ if short_version == '3.4':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+ ])
+
+ if short_version in ['3.5', '3.6']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ ])
+
+ if short_version in ['3.7', '3.8', '3.9']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
+ if regions_enabled:
+ region_predicate = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+ }
+ predicates.append(region_predicate)
+
+ return predicates
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
new file mode 100644
index 000000000..18e1b2e0c
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -0,0 +1,117 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, zones_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ priorities = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to origin short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ if short_version == '3.1':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.2':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.3':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.4':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']:
+ priorities.extend([
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1},
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if zones_enabled:
+ zone_priority = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+ }
+ priorities.append(zone_priority)
+
+ return priorities
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/lib_utils/test/conftest.py
index df948fff0..aabdd4fa1 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/lib_utils/test/conftest.py
@@ -1,7 +1,15 @@
# pylint: disable=missing-docstring,invalid-name,redefined-outer-name
+import os
import pytest
+import sys
+
from OpenSSL import crypto
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
+
+from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402
+from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402
+
# Parameter list for valid_cert fixture
VALID_CERTIFICATE_PARAMS = [
{
@@ -117,3 +125,48 @@ def valid_cert(request, ca):
'cert_file': cert_file,
'cert': cert
}
+
+
+@pytest.fixture()
+def predicates_lookup():
+ return PredicatesLookupModule()
+
+
+@pytest.fixture()
+def priorities_lookup():
+ return PrioritiesLookupModule()
+
+
+@pytest.fixture()
+def facts():
+ return {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+
+@pytest.fixture(params=[True, False])
+def regions_enabled(request):
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def zones_enabled(request):
+ return request.param
+
+
+def v_prefix(release):
+ """Prefix a release number with 'v'."""
+ return "v" + release
+
+
+def minor(release):
+ """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
+ return release + ".1"
+
+
+@pytest.fixture(params=[str, v_prefix, minor])
+def release_mod(request):
+ """Modifies a release string to alternative valid values."""
+ return request.param
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
index e8da1e04a..e8da1e04a 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py
index 140cced73..140cced73 100644
--- a/roles/openshift_master_facts/test/conftest.py
+++ b/roles/lib_utils/test/openshift_master_facts_conftest.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
index 11aad9f03..11aad9f03 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
index 527fc9ff4..527fc9ff4 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py
index 8a521a765..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/lib_utils/test/test_fakeopensslclasses.py
diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py
index 98792e2ee..98792e2ee 100644
--- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py
+++ b/roles/lib_utils/test/test_load_and_handle_cert.py
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 677f206ea..50ad7e373 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: nickhammond.logrotate | Install logrotate
package: name=logrotate state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml
index 36838debc..0d0b8d1a5 100644
--- a/roles/nuage_ca/meta/main.yml
+++ b/roles/nuage_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: nuage_common }
+- role: nuage_common
diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml
index d96d0d802..cb7844bc5 100644
--- a/roles/nuage_ca/tasks/main.yaml
+++ b/roles/nuage_ca/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- name: Install openssl
package: name=openssl state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml
index 6c8c9f8d2..ec42518ff 100644
--- a/roles/nuage_common/tasks/main.yml
+++ b/roles/nuage_common/tasks/main.yml
@@ -2,17 +2,17 @@
- name: Set the Nuage plugin openshift directory fact to handle Atomic host install
set_fact:
nuage_node_plugin_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI network config directory fact to handle Atomic host install
set_fact:
nuage_node_cni_netconf_dir: /var/etc/cni/net.d/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI binary directory fact to handle Atomic host install
set_fact:
nuage_node_cni_bin_dir: /var/opt/cni/bin/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Assure CNI plugin config dir exists before daemon set install
become: yes
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index e2f7af5ad..643800680 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -14,4 +14,4 @@ galaxy_info:
- system
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index c264427de..29e16b6f8 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -5,22 +5,22 @@
- name: Set the Nuage certificate directory fact for Atomic hosts
set_fact:
cert_output_dir: /var/usr/share/nuage-openshift-monitor
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage kubeconfig file path fact for Atomic hosts
set_fact:
kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage monitor yaml location fact for Atomic hosts
set_fact:
kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage monitor certs location fact for Atomic hosts
set_fact:
nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage master config directory for daemon sets install
set_fact:
@@ -35,27 +35,27 @@
- name: Set the Nuage CNI plugin binary directory for daemon sets install
set_fact:
nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
- name: Create directory /var/usr/share/nuage-openshift-monitor
become: yes
file: path=/var/usr/share/nuage-openshift-monitor state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create directory /var/usr/bin for monitor binary on atomic
become: yes
file: path=/var/usr/bin state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create CNI bin directory /var/opt/cni/bin
become: yes
file: path=/var/opt/cni/bin state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create the log directory
become: yes
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
index fbf2c4f8d..9127b33d6 100644
--- a/roles/nuage_master/tasks/serviceaccount.yml
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -19,7 +19,7 @@
- name: Generate the node client config
command: >
- {{ openshift.common.client_binary }} adm create-api-client-config
+ {{ openshift_client_binary }} adm create-api-client-config
--certificate-authority={{ openshift_master_ca_cert }}
--client-dir={{ cert_output_dir }}
--master={{ openshift.master.api_url }}
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index 9b0315054..0480502b7 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -15,4 +15,4 @@ galaxy_info:
dependencies:
- role: nuage_common
- role: nuage_ca
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index c6b7a9b10..1f1bd1653 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -2,17 +2,17 @@
- name: Set the Nuage plugin openshift directory fact for Atomic hosts
set_fact:
vsp_openshift_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI binary directory fact for Atomic hosts
set_fact:
cni_bin_dir: /var/opt/cni/bin/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage plugin certs directory fact for Atomic hosts
set_fact:
nuage_plugin_crt_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Assure CNI conf dir exists
become: yes
@@ -36,7 +36,7 @@
- name: Add additional Docker mounts for Nuage for atomic hosts
become: yes
lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Restart node services
command: /bin/true
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 4aca5c7a8..de73ab01d 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions
against an AWS account for the purposes of dynamically building an openshift
cluster.
-This role is primarily intended to be used with "include_role" and "tasks_from".
+This role is primarily intended to be used with "import_role" and "tasks_from".
-include_role can be called from the tasks section in a play. See example
+import_role can be called from the tasks section in a play. See example
playbook below for reference.
These task-areas are:
@@ -40,7 +40,7 @@ Example Playbook
----------------
```yaml
-- include_role:
+- import_role:
name: openshift_aws
tasks_from: vpc.yml
vars:
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 74e5d1dde..efd2468b2 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -98,17 +98,26 @@ openshift_aws_elb_dict:
proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: True
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
@@ -122,12 +131,25 @@ openshift_aws_ami_map:
openshift_aws_master_group:
- name: "{{ openshift_aws_clusterid }} master group"
group: master
+ tags:
+ host-type: master
+ sub-host-type: default
+ runtime: docker
openshift_aws_node_groups:
- name: "{{ openshift_aws_clusterid }} compute group"
group: compute
+ tags:
+ host-type: node
+ sub-host-type: compute
+ runtime: docker
+
- name: "{{ openshift_aws_clusterid }} infra group"
group: infra
+ tags:
+ host-type: node
+ sub-host-type: infra
+ runtime: docker
openshift_aws_created_asgs: []
openshift_aws_current_asgs: []
@@ -144,10 +166,6 @@ openshift_aws_master_group_config:
min_size: 3
max_size: 3
desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- runtime: docker
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -167,10 +185,6 @@ openshift_aws_node_group_config:
min_size: 3
max_size: 100
desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- runtime: docker
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -186,10 +200,6 @@ openshift_aws_node_group_config:
min_size: 2
max_size: 20
desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
- runtime: docker
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -200,6 +210,7 @@ openshift_aws_node_group_config:
openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_elb_az_load_balancing: False
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 7fb617dd5..a9f9cc3c4 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -30,7 +30,7 @@
- name: query all asg's for this cluster
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
- tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}"
register: asgs
- fail:
@@ -43,6 +43,7 @@
- name: set the value for the deployment_serial and the current asgs
set_fact:
+ # scale_groups_serial is a custom filter in role lib_utils
l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..a52f63bd5
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,15 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 696b323c0..786db1570 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -14,11 +14,7 @@
instance_type: m4.xlarge
vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
wait: yes
exact_count: 1
count_tag:
@@ -46,5 +42,5 @@
- name: add host to nodes
add_host:
- groups: nodes
+ groups: nodes,g_new_node_hosts
name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 3632f7ce9..6ce8c58ba 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -22,7 +22,7 @@
else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- "{{ openshift_aws_node_group_config_tags
- | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine(openshift_aws_node_group.tags)
| combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
- name: append the asg name to the openshift_aws_created_asgs fact
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 1f4ef3e1c..3ad876e37 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -8,6 +8,7 @@
tags:
"{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
+ # scale_groups_match_capacity is a custom filter in role lib_utils
until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml
index 422d08400..60ac189a8 100644
--- a/roles/openshift_builddefaults/meta/main.yml
+++ b/roles/openshift_builddefaults/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_buildoverrides/meta/main.yml b/roles/openshift_buildoverrides/meta/main.yml
index e9d2e8712..edca92e6f 100644
--- a/roles/openshift_buildoverrides/meta/main.yml
+++ b/roles/openshift_buildoverrides/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml
index cf49a6ebf..df53280c8 100644
--- a/roles/openshift_buildoverrides/vars/main.yml
+++ b/roles/openshift_buildoverrides/vars/main.yml
@@ -9,3 +9,4 @@ buildoverrides_yaml:
imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
+ tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index 81b49ce60..b2081efc6 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -15,3 +15,4 @@ galaxy_info:
dependencies:
- role: openshift_cli
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index ea4702248..9c8534c74 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,9 +9,9 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: install_result
until: install_result is succeeded
delegate_to: "{{ openshift_ca_host }}"
@@ -19,7 +19,8 @@
- name: Reload generated facts
openshift_facts:
- when: hostvars[openshift_ca_host].install_result is changed
+ when:
+ - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed
- name: Create openshift_ca_config_dir if it does not exist
file:
@@ -41,7 +42,7 @@
- set_fact:
master_ca_missing: "{{ False in (g_master_ca_stat_result.results
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list) }}"
run_once: true
@@ -87,11 +88,11 @@
# This should NOT replace the CA due to --overwrite=false when a CA already exists.
- name: Create the master certificates if they do not already exist
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
@@ -117,7 +118,7 @@
src: "{{ item }}"
dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/"
remote_src: true
- with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}"
+ with_items: "{{ g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- copy:
@@ -137,7 +138,7 @@
- name: Test local loopback context
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view
--config={{ openshift_master_loopback_config }}
changed_when: false
register: loopback_config
@@ -154,9 +155,9 @@
register: openshift_ca_loopback_tmpdir
- name: Generate the loopback master client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority={{ openshift_ca_cert }}
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml
index c13b29ba5..6758f5b36 100644
--- a/roles/openshift_certificate_expiry/meta/main.yml
+++ b/roles/openshift_certificate_expiry/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index b5234bd1e..7062b5060 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -7,7 +7,6 @@
register: check_results
- name: Generate expiration report HTML
- become: no
run_once: yes
template:
src: cert-expiry-table.html.j2
@@ -17,11 +16,12 @@
- name: Generate the result JSON string
run_once: yes
- set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
+ set_fact:
+ # oo_cert_expiry_results_to_json is a custom filter in role lib_utils
+ json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
- become: no
run_once: yes
template:
src: save_json_results.j2
diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml
index 631a0455e..9faec639f 100644
--- a/roles/openshift_cli/defaults/main.yml
+++ b/roles/openshift_cli/defaults/main.yml
@@ -8,4 +8,4 @@ system_images_registry: "{{ system_images_registry_dict[openshift_deployment_typ
openshift_use_crio_only: False
l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}"
-l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}"
+l_use_cli_atomic_image: "{{ (openshift_use_crio_only | bool) or (l_is_system_container_image | bool) }}"
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 5d2b6abed..e531543b9 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 68d82e436..ae8d1ace0 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install clients
package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -12,13 +12,14 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "docker"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_use_cli_atomic_image | bool
- block:
@@ -28,13 +29,14 @@
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
@@ -42,6 +44,6 @@
- name: Install bash completion for oc tools
package: name=bash-completion state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_cloud_provider/meta/main.yml b/roles/openshift_cloud_provider/meta/main.yml
index 8ab95bf5a..e49cc4430 100644
--- a/roles/openshift_cloud_provider/meta/main.yml
+++ b/roles/openshift_cloud_provider/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index dff492a69..3513577fa 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -19,3 +19,6 @@
- include_tasks: gce.yml
when: cloudprovider_is_gce | bool
+
+- include_tasks: vsphere.yml
+ when: cloudprovider_is_vsphere | bool
diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml
new file mode 100644
index 000000000..3a33df241
--- /dev/null
+++ b/roles/openshift_cloud_provider/tasks/vsphere.yml
@@ -0,0 +1,6 @@
+---
+- name: Create cloud config
+ template:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf"
+ src: vsphere.conf.j2
+ when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 313ee02b4..30f18ffa9 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -19,3 +19,7 @@ region = {{ openshift_cloudprovider_openstack_region }}
[LoadBalancer]
subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }}
{% endif %}
+{% if openshift_cloudprovider_openstack_blockstorage_version is defined %}
+[BlockStorage]
+bs-version={{ openshift_cloudprovider_openstack_blockstorage_version }}
+{% endif %} \ No newline at end of file
diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
new file mode 100644
index 000000000..84e5e371c
--- /dev/null
+++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
@@ -0,0 +1,15 @@
+[Global]
+user = "{{ openshift_cloudprovider_vsphere_username }}"
+password = "{{ openshift_cloudprovider_vsphere_password }}"
+server = "{{ openshift_cloudprovider_vsphere_host }}"
+port = 443
+insecure-flag = 1
+datacenter = {{ openshift_cloudprovider_vsphere_datacenter }}
+datastore = {{ openshift_cloudprovider_vsphere_datastore }}
+{% if openshift_cloudprovider_vsphere_folder is defined %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/
+{% else %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/
+{% endif %}
+[Disk]
+scsicontrollertype = pvscsi
diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml
index c9d953f58..e71db80b9 100644
--- a/roles/openshift_cloud_provider/vars/main.yml
+++ b/roles/openshift_cloud_provider/vars/main.yml
@@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}"
cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}"
cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}"
cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}"
+cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}"
diff --git a/roles/openshift_cluster_autoscaler/README.md b/roles/openshift_cluster_autoscaler/README.md
index d775a8a71..137ae0cef 100644
--- a/roles/openshift_cluster_autoscaler/README.md
+++ b/roles/openshift_cluster_autoscaler/README.md
@@ -28,7 +28,7 @@ Example Playbook
remote_user: root
tasks:
- name: include role autoscaler
- include_role:
+ import_role:
name: openshift_cluster_autoscaler
vars:
openshift_clusterid: opstest
diff --git a/roles/openshift_cluster_autoscaler/meta/main.yml b/roles/openshift_cluster_autoscaler/meta/main.yml
index d2bbd2576..543eb6fed 100644
--- a/roles/openshift_cluster_autoscaler/meta/main.yml
+++ b/roles/openshift_cluster_autoscaler/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml
new file mode 100644
index 000000000..ebe5671d2
--- /dev/null
+++ b/roles/openshift_daemonset_config/defaults/main.yml
@@ -0,0 +1,19 @@
+---
+openshift_daemonset_config_namespace: openshift-node
+openshift_daemonset_config_daemonset_name: ops-node-config
+openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}"
+openshift_daemonset_config_node_selector:
+ config: config
+openshift_daemonset_config_sa_name: ops
+openshift_daemonset_config_configmap_files: {}
+openshift_daemonset_config_configmap_literals: {}
+openshift_daemonset_config_monitoring: False
+openshift_daemonset_config_interval: 300
+openshift_daemonset_config_script: config.sh
+openshift_daemonset_config_secret_name: operations-config-secret
+openshift_daemonset_config_secrets: {}
+openshift_daemonset_config_runasuser: 0
+openshift_daemonset_config_privileged: True
+openshift_daemonset_config_resources:
+ cpu: 10m
+ memory: 10Mi
diff --git a/roles/openshift_daemonset_config/meta/main.yml b/roles/openshift_daemonset_config/meta/main.yml
new file mode 100644
index 000000000..d2bbd2576
--- /dev/null
+++ b/roles/openshift_daemonset_config/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_openshift
diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml
new file mode 100644
index 000000000..450cc9dca
--- /dev/null
+++ b/roles/openshift_daemonset_config/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+- name: add a sa
+ oc_serviceaccount:
+ name: "{{ openshift_daemonset_config_sa_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+
+- name: add sa to privileged scc
+ oc_adm_policy_user:
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_daemonset_config_namespace }}:{{ openshift_daemonset_config_sa_name }}"
+
+- name: copy template to disk
+ template:
+ dest: "/tmp/{{ item.name }}"
+ src: "{{ item.name }}.j2"
+ with_items:
+ - name: daemonset.yml
+
+- name: copy files to disk
+ copy:
+ src: "{{ item.key }}"
+ dest: "{{ item.value }}"
+ with_dict: "{{ openshift_daemonset_config_configmap_files }}"
+
+- name: create the namespace
+ oc_project:
+ state: present
+ name: "{{ openshift_daemonset_config_namespace }}"
+
+- name: lay down secrets
+ oc_secret:
+ state: present
+ name: "{{ openshift_daemonset_config_secret_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ delete_after: true
+ contents: "{{ openshift_daemonset_config_secrets }}"
+ when:
+ - openshift_daemonset_config_secrets != {}
+
+- name: create the configmap
+ oc_configmap:
+ state: present
+ name: "{{ openshift_daemonset_config_configmap_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ from_literal: "{{ openshift_daemonset_config_configmap_literals }}"
+ from_file: "{{ openshift_daemonset_config_configmap_files }}"
+
+- name: deploy daemonset
+ oc_obj:
+ state: present
+ namespace: "{{ openshift_daemonset_config_namespace }}" # openshift-node??
+ name: "{{ openshift_daemonset_config_daemonset_name }}"
+ kind: daemonset
+ files:
+ - /tmp/daemonset.yml
diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
new file mode 100644
index 000000000..9792f6d16
--- /dev/null
+++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
@@ -0,0 +1,142 @@
+---
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: {{ openshift_daemonset_config_daemonset_name }}
+ annotations:
+ kubernetes.io/description: |
+ This daemon set manages the operational configuration for a cluster and ensures all nodes have
+ a concrete set of config in place. It could also use a local ansible run against the /host directory.
+spec:
+ selector:
+ matchLabels:
+ app: {{ openshift_daemonset_config_daemonset_name }}
+ confighosts: ops
+ ops.openshift.io/role: operations
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: {{ openshift_daemonset_config_daemonset_name }}
+ confighosts: ops
+ ops.openshift.io/role: operations
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+{% if openshift_daemonset_config_node_selector is defined and openshift_daemonset_config_node_selector != {} %}
+ nodeSelector: {{ openshift_daemonset_config_node_selector | to_json }}
+{% endif %}
+ serviceAccountName: {{ openshift_daemonset_config_sa_name }}
+ hostNetwork: true
+ hostPID: true
+ hostIPC: true
+ containers:
+ - name: config
+ image: centos:7
+ env:
+ - name: RESYNC_INTERVAL
+ value: "{{ openshift_daemonset_config_interval }}"
+ command:
+ - /bin/bash
+ - -c
+ - |
+ #!/bin/sh
+ set -o errexit
+
+ while true; do
+
+ # execute user defined script
+ sh /opt/config/{{ openshift_daemonset_config_script }}
+
+ # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again
+ echo "Success, sleeping for ${RESYNC_INTERVAL}s"
+ exec sleep ${RESYNC_INTERVAL}
+
+ # Return to perform the config
+ done
+ securityContext:
+ # Must be root to modify host system
+ runAsUser: {{ openshift_daemonset_config_runasuser }}
+ # Permission could be reduced by selecting an appropriate SELinux policy that allows
+ # us to update the named directories
+ privileged: {{ openshift_daemonset_config_privileged }}
+ volumeMounts:
+ # Directory which contains the host volume.
+ - mountPath: /host
+ name: host
+ # Our node configuration
+ - mountPath: /opt/config
+ name: config
+{% if openshift_daemonset_config_secrets != {} %}
+ # Our delivered secrets
+ - mountPath: /opt/secrets
+ name: secrets
+{% endif %}
+ resources:
+ requests:
+ cpu: {{ openshift_daemonset_config_resources.cpu }}
+ memory: {{ openshift_daemonset_config_resources.memory }}
+{% if openshift_daemonset_config_monitoring %}
+ - name: monitoring
+ image: openshifttools/oso-centos7-host-monitoring:latest
+ securityContext:
+ # Must be root to read content
+ runAsUser: 0
+ privileged: true
+
+ volumeMounts:
+ - mountPath: /host
+ name: host
+ readOnly: true
+ - mountPath: /etc/localtime
+ subPath: etc/localtime
+ name: host
+ readOnly: true
+ - mountPath: /sys
+ subPath: sys
+ name: host
+ readOnly: true
+ - mountPath: /var/run/docker.sock
+ subPath: var/run/docker.sock
+ name: host
+ readOnly: true
+ - mountPath: /var/run/openvswitch
+ subPath: var/run/openvswitch
+ name: host
+ readOnly: true
+ - mountPath: /etc/origin
+ subPath: etc/origin
+ name: host
+ readOnly: true
+ - mountPath: /usr/bin/oc
+ subPath: usr/bin/oc
+ name: host
+ readOnly: true
+ name: host
+ readOnly: true
+ - mountPath: /host/var/cache/yum
+ subPath: var/cache/yum
+ name: host
+ - mountPath: /container_setup/monitoring-config.yml
+ subPath: monitoring-config.yaml
+ name: config
+ - mountPath: /opt/config
+ name: config
+ resources:
+ requests:
+ cpu: 10m
+ memory: 10Mi
+{% endif %}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ openshift_daemonset_config_configmap_name }}
+{% if openshift_daemonset_config_secrets != {} %}
+ - name: secrets
+ secret:
+ secretName: {{ openshift_daemonset_config_secret_name }}
+{% endif %}
+ - name: host
+ hostPath:
+ path: /
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
index d7d57fe39..30671a59a 100644
--- a/roles/openshift_default_storage_class/meta/main.yml
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml
index f88a7c533..c8472d8bc 100644
--- a/roles/openshift_docker_gc/meta/main.yml
+++ b/roles/openshift_docker_gc/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 0e28fec03..25ae6a936 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_etcd_facts
- role: etcd
+- role: lib_utils
diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml
index fbc72c8a3..6c79d345c 100644
--- a/roles/openshift_etcd_client_certificates/meta/main.yml
+++ b/roles/openshift_etcd_client_certificates/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml
index 7f8b667f0..18d07fc2f 100644
--- a/roles/openshift_etcd_client_certificates/tasks/main.yml
+++ b/roles/openshift_etcd_client_certificates/tasks/main.yml
@@ -1,4 +1,4 @@
---
-- include_role:
+- import_role:
name: etcd
tasks_from: client_certificates
diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml
index 925aa9f92..5e64a8596 100644
--- a/roles/openshift_etcd_facts/meta/main.yml
+++ b/roles/openshift_etcd_facts/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 0c072b64a..d716c9505 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -1,6 +1,6 @@
---
-etcd_is_containerized: "{{ openshift.common.is_containerized }}"
-etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_is_containerized: "{{ openshift_is_containerized | bool }}"
+etcd_is_atomic: "{{ openshift_is_atomic }}"
etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index e623b33f3..0a6e8f20c 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true
content_version: "{{ openshift.common.examples_content_version }}"
-examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
+examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples"
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams:
- "{{ image_streams_base }}/image-streams-centos7.json"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 68a0e8857..648bf7293 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.4.7
-ORIGIN_VERSION=${1:-v3.7}
+ORIGIN_VERSION=${1:-v3.9}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
index e7af160d9..ad17b709e 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
@@ -944,7 +961,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "openshift/jenkins-2-centos7:latest"
+ "name": "openshift/jenkins-2-centos7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
index 2b082fc75..efc8705f4 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
@@ -846,7 +863,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
index 3771280bf..e944f21a5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
@@ -198,12 +199,7 @@
}
},
"env": [
- ],
- "resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
- }
+ ]
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..87ae6ed14 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral",
+ "template": "jenkins-ephemeral-template"
+ },
"objects": [
{
"kind": "Route",
@@ -275,10 +279,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..95d15b55f 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent",
+ "template": "jenkins-persistent-template"
+ },
"objects": [
{
"kind": "Route",
@@ -299,10 +303,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index f3fe2dcbe..9f46a4683 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -11,4 +11,6 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 356317431..7787da4f0 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -13,18 +13,23 @@
# use it either due to changes introduced in Ansible 2.x.
- name: Create local temp dir for OpenShift examples copy
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- become: False
register: copy_examples_mktemp
run_once: True
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 777 "{{ copy_examples_mktemp.stdout }}"
+ run_once: True
+
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" .
args:
# Disables the following warning:
# Consider using unarchive module rather than running tar
warn: no
- become: False
- register: copy_examples_tar
+
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar"
+ run_once: True
- name: Create the remote OpenShift examples directory
file:
@@ -38,7 +43,6 @@
dest: "{{ examples_base }}/"
- name: Cleanup the OpenShift Examples temp dir
- become: False
local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent
# Done copying examples
@@ -53,7 +57,7 @@
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_rhel | bool
with_items:
- "{{ rhel_image_streams }}"
@@ -63,7 +67,7 @@
- name: Import Centos Image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_centos | bool
with_items:
- "{{ centos_image_streams }}"
@@ -73,7 +77,7 @@
- name: Import db templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
when: openshift_examples_load_db_templates | bool
register: oex_import_db_templates
failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -90,7 +94,7 @@
- "{{ quickstarts_base }}/django.json"
- name: Remove defunct quickstart templates from openshift namespace
- command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+ command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- nodejs-example
- cakephp-example
@@ -102,7 +106,7 @@
- name: Import quickstart-templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
when: openshift_examples_load_quickstarts | bool
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -116,7 +120,7 @@
- "{{ xpaas_templates_base }}/sso70-basic.json"
- name: Remove old xPaas templates from openshift namespace
- command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+ command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- sso70-basic
register: oex_delete_old_xpaas_templates
@@ -125,7 +129,7 @@
- name: Import xPaas image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_streams
failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -133,7 +137,7 @@
- name: Import xPaas templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_templates
failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index ad7c00d14..6532d7fe2 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,14 +1,14 @@
---
- when:
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
- r_openshift_excluder_install_ran is not defined
block:
- name: Install docker excluder - yum
package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
@@ -23,7 +23,7 @@
# https://bugzilla.redhat.com/show_bug.cgi?id=1199432
- name: Install docker excluder - dnf
package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
@@ -33,7 +33,7 @@
- name: Install openshift excluder - yum
package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
@@ -47,7 +47,7 @@
# https://bugzilla.redhat.com/show_bug.cgi?id=1199432
- name: Install openshift excluder - dnf
package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 4f5277fa2..22a3fcd3b 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -3,7 +3,7 @@
# - excluder
- name: Get available excluder version
repoquery:
- name: "{{ excluder }}"
+ name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index c9c7b378c..402c3dc3e 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new
partition space.
- hosts: mynodes
- become: no
remote_user: root
gather_facts: no
roles:
@@ -68,7 +67,6 @@ partition space.
* Create an ansible playbook, say `expandvar.yaml`:
```
- hosts: mynodes
- become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index c7e21ba99..b38ebdfb4 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,16 +1,16 @@
---
- name: Ensure growpart is installed
package: name=cloud-utils-growpart state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
+ failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Grow the partitions
command: "growpart {{oep_drive}} {{oep_partition}}"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 53a3bc87e..a223ffba6 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -1,8 +1,13 @@
---
+openshift_client_binary: "{{ (openshift_is_containerized | bool) | ternary('/usr/local/bin/oc', 'oc') }}"
+
openshift_cli_image_dict:
origin: 'openshift/origin'
openshift-enterprise: 'openshift3/ose'
+repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}"
+repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}"
+
openshift_hosted_images_dict:
origin: 'openshift/origin-${component}:${version}'
openshift-enterprise: 'openshift3/ose-${component}:${version}'
@@ -94,11 +99,6 @@ openshift_prometheus_alertbuffer_storage_access_modes:
openshift_prometheus_alertbuffer_storage_create_pv: True
openshift_prometheus_alertbuffer_storage_create_pvc: False
-
-openshift_router_selector: "region=infra"
-openshift_hosted_router_selector: "{{ openshift_router_selector }}"
-openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
-
openshift_service_type_dict:
origin: origin
openshift-enterprise: atomic-openshift
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index a10ba9310..d7c358a2f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -69,22 +69,6 @@ def migrate_common_facts(facts):
return facts
-def migrate_node_facts(facts):
- """ Migrate facts from various roles into node """
- params = {
- 'common': ('dns_ip'),
- }
- if 'node' not in facts:
- facts['node'] = {}
- # pylint: disable=consider-iterating-dictionary
- for role in params.keys():
- if role in facts:
- for param in params[role]:
- if param in facts[role]:
- facts['node'][param] = facts[role].pop(param)
- return facts
-
-
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
@@ -104,7 +88,6 @@ def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_common_facts(migrated_facts)
- migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
@@ -536,8 +519,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes master.registry_url, node.registry_url,
- node.storage_plugin_deps
+ includes master.registry_url
Args:
facts (dict): existing facts
@@ -545,29 +527,17 @@ def set_deployment_facts_if_unset(facts):
dict: the facts dict updated with the generated deployment_type
facts
"""
- # disabled to avoid breaking up facts related to deployment type into
- # multiple methods for now.
- # pylint: disable=too-many-statements, too-many-branches
- for role in ('master', 'node'):
- if role in facts:
- deployment_type = facts['common']['deployment_type']
- if 'registry_url' not in facts[role]:
- registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type == 'openshift-enterprise':
- registry_url = 'openshift3/ose-${component}:${version}'
- facts[role]['registry_url'] = registry_url
-
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
-
- if 'node' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'storage_plugin_deps' not in facts['node']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
+ if 'registry_url' not in facts['master']:
+ registry_url = 'openshift/origin-${component}:${version}'
+ if deployment_type == 'openshift-enterprise':
+ registry_url = 'openshift3/ose-${component}:${version}'
+ facts['master']['registry_url'] = registry_url
return facts
@@ -686,26 +656,6 @@ def set_nodename(facts):
return facts
-def migrate_oauth_template_facts(facts):
- """
- Migrate an old oauth template fact to a newer format if it's present.
-
- The legacy 'oauth_template' fact was just a filename, and assumed you were
- setting the 'login' template.
-
- The new pluralized 'oauth_templates' fact is a dict mapping the template
- name to a filename.
-
- Simplify the code after this by merging the old fact into the new.
- """
- if 'master' in facts and 'oauth_template' in facts['master']:
- if 'oauth_templates' not in facts['master']:
- facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
- elif 'login' not in facts['master']['oauth_templates']:
- facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
- return facts
-
-
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -792,62 +742,6 @@ def get_current_config(facts):
return current_config
-def build_kubelet_args(facts):
- """Build node kubelet_args
-
-In the node-config.yaml file, kubeletArgument sub-keys have their
-values provided as a list. Hence the gratuitous use of ['foo'] below.
- """
- cloud_cfg_path = os.path.join(
- facts['common']['config_base'],
- 'cloudprovider')
-
- # We only have to do this stuff on hosts that are nodes
- if 'node' in facts:
- # Any changes to the kubeletArguments parameter are stored
- # here first.
- kubelet_args = {}
-
- if 'cloudprovider' in facts:
- # EVERY cloud is special <3
- if 'kind' in facts['cloudprovider']:
- if facts['cloudprovider']['kind'] == 'aws':
- kubelet_args['cloud-provider'] = ['aws']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- kubelet_args['cloud-provider'] = ['openstack']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
- if facts['cloudprovider']['kind'] == 'gce':
- kubelet_args['cloud-provider'] = ['gce']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
-
- # Automatically add node-labels to the kubeletArguments
- # parameter. See BZ1359848 for additional details.
- #
- # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
- if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
- # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
- # into ['foo=bar', 'a=b']
- #
- # On the openshift_node_labels inventory variable we loop
- # over each key-value tuple (from .items()) and join the
- # key to the value with an '=' character, this produces a
- # list.
- #
- # map() seems to be returning an itertools.imap object
- # instead of a list. We cast it to a list ourselves.
- # pylint: disable=unnecessary-lambda
- labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
- if labels_str != '':
- kubelet_args['node-labels'] = labels_str
-
- # If we've added items to the kubelet_args dict then we need
- # to merge the new items back into the main facts object.
- if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
- return facts
-
-
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -973,7 +867,7 @@ def get_openshift_version(facts):
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
- elif 'common' in facts and 'is_containerized' in facts['common']:
+ else:
version = get_container_openshift_version(facts)
# Handle containerized masters that have not yet been configured as a node.
@@ -1364,75 +1258,10 @@ def set_container_facts_if_unset(facts):
dict: the facts dict updated with the generated containerization
facts
"""
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'openshift-enterprise':
- master_image = 'openshift3/ose'
- node_image = 'openshift3/node'
- ovs_image = 'openshift3/openvswitch'
- pod_image = 'openshift3/ose-pod'
- router_image = 'openshift3/ose-haproxy-router'
- registry_image = 'openshift3/ose-docker-registry'
- deployer_image = 'openshift3/ose-deployer'
- else:
- master_image = 'openshift/origin'
- node_image = 'openshift/node'
- ovs_image = 'openshift/openvswitch'
- pod_image = 'openshift/origin-pod'
- router_image = 'openshift/origin-haproxy-router'
- registry_image = 'openshift/origin-docker-registry'
- deployer_image = 'openshift/origin-deployer'
-
- facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
-
- if 'is_containerized' not in facts['common']:
- facts['common']['is_containerized'] = facts['common']['is_atomic']
- if 'pod_image' not in facts['common']:
- facts['common']['pod_image'] = pod_image
- if 'router_image' not in facts['common']:
- facts['common']['router_image'] = router_image
- if 'registry_image' not in facts['common']:
- facts['common']['registry_image'] = registry_image
- if 'deployer_image' not in facts['common']:
- facts['common']['deployer_image'] = deployer_image
- if 'master' in facts and 'master_image' not in facts['master']:
- facts['master']['master_image'] = master_image
- facts['master']['master_system_image'] = master_image
- if 'node' in facts:
- if 'node_image' not in facts['node']:
- facts['node']['node_image'] = node_image
- facts['node']['node_system_image'] = node_image
- if 'ovs_image' not in facts['node']:
- facts['node']['ovs_image'] = ovs_image
- facts['node']['ovs_system_image'] = ovs_image
-
- if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
-def set_installed_variant_rpm_facts(facts):
- """ Set RPM facts of installed variant
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with installed_variant_rpms
- """
- installed_rpms = []
- for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
- optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
- variant_rpms = [base_rpm] + \
- ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
- ['tuned-profiles-%s-node' % base_rpm]
- for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
- if exit_code == 0:
- installed_rpms.append(rpm)
-
- facts['common']['installed_variant_rpms'] = installed_rpms
- return facts
-
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1538,14 +1367,12 @@ class OpenShiftFacts(object):
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite)
- facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
- facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
@@ -1553,8 +1380,6 @@ class OpenShiftFacts(object):
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
- if not safe_get_bool(facts['common']['is_containerized']):
- facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
return dict(openshift=facts)
@@ -1582,7 +1407,6 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1607,10 +1431,7 @@ class OpenShiftFacts(object):
max_requests_inflight=500)
if 'node' in roles:
- defaults['node'] = dict(labels={}, annotations={},
- iptables_sync_period='30s',
- local_quota_per_fsgroup="",
- set_node_ip=False)
+ defaults['node'] = dict(labels={})
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml
new file mode 100644
index 000000000..7fd7a085d
--- /dev/null
+++ b/roles/openshift_grafana/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+gf_body_tmp:
+ name: grafana_name
+ type: prometheus
+ typeLogoUrl: ''
+ access: proxy
+ url: prometheus_url
+ basicAuth: false
+ withCredentials: false
+ jsonData:
+ tlsSkipVerify: true
+ token: satoken
diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
new file mode 100644
index 000000000..82fa89004
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
@@ -0,0 +1,661 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: The location of the proxy image
+ name: IMAGE_GF
+ value: mrsiano/grafana-ocp:latest
+- description: The location of the proxy image
+ name: IMAGE_PROXY
+ value: openshift/oauth-proxy:v1.0.0
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+- description: The session secret for the proxy
+ name: SESSION_SECRET
+ generate: expression
+ from: "[a-zA-Z0-9]{43}"
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ annotations:
+ serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}'
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gf-cluster-reader
+ roleRef:
+ name: cluster-reader
+ subjects:
+ - kind: ServiceAccount
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+ tls:
+ termination: Reencrypt
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/scheme: https
+ service.alpha.openshift.io/serving-cert-secret-name: gf-tls
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ ports:
+ - name: grafana-ocp
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: grafana-ocp
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: gf-proxy
+ namespace: "${NAMESPACE}"
+ stringData:
+ session_secret: "${SESSION_SECRET}="
+# Deploy Prometheus behind an oauth proxy
+- apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: grafana-ocp
+ template:
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp-app
+ spec:
+ serviceAccountName: grafana-ocp
+ containers:
+ - name: oauth-proxy
+ image: ${IMAGE_PROXY}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp
+ - -upstream=http://localhost:3000
+ - -provider=openshift
+# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}'
+ - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+
+ - name: grafana-ocp
+ image: ${IMAGE_GF}
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ name: gf-data
+ - mountPath: "/root/go/src/github.com/grafana/grafana/conf"
+ name: gfconfig
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+ command:
+ - "./bin/grafana-server"
+
+ volumes:
+ - name: gfconfig
+ configMap:
+ name: gf-config
+ - name: secrets
+ secret:
+ secretName: gf-proxy
+ - name: gf-tls
+ secret:
+ secretName: gf-tls
+ - emptyDir: {}
+ name: gf-data
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: gf-config
+ namespace: "${NAMESPACE}"
+ data:
+ defaults.ini: |-
+ ##################### Grafana Configuration Defaults #####################
+ #
+ # Do not modify this file in grafana installs
+ #
+
+ # possible values : production, development
+ app_mode = production
+
+ # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+ instance_name = ${HOSTNAME}
+
+ #################################### Paths ###############################
+ [paths]
+ # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+ #
+ data = data
+ #
+ # Directory where grafana can store logs
+ #
+ logs = data/log
+ #
+ # Directory where grafana will automatically scan and look for plugins
+ #
+ plugins = data/plugins
+
+ #################################### Server ##############################
+ [server]
+ # Protocol (http, https, socket)
+ protocol = http
+
+ # The ip address to bind to, empty will bind to all interfaces
+ http_addr =
+
+ # The http port to use
+ http_port = 3000
+
+ # The public facing domain name used to access grafana from a browser
+ domain = localhost
+
+ # Redirect to correct domain if host header does not match domain
+ # Prevents DNS rebinding attacks
+ enforce_domain = false
+
+ # The full public facing url
+ root_url = %(protocol)s://%(domain)s:%(http_port)s/
+
+ # Log web requests
+ router_logging = false
+
+ # the path relative working path
+ static_root_path = public
+
+ # enable gzip
+ enable_gzip = false
+
+ # https certs & key file
+ cert_file = /etc/tls/private/tls.crt
+ cert_key = /etc/tls/private/tls.key
+
+ # Unix socket path
+ socket = /tmp/grafana.sock
+
+ #################################### Database ############################
+ [database]
+ # You can configure the database connection by specifying type, host, name, user and password
+ # as separate properties or as on string using the url property.
+
+ # Either "mysql", "postgres" or "sqlite3", it's your choice
+ type = sqlite3
+ host = 127.0.0.1:3306
+ name = grafana
+ user = root
+ # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+ password =
+ # Use either URL or the previous fields to configure the database
+ # Example: mysql://user:secret@host:port/database
+ url =
+
+ # Max idle conn setting default is 2
+ max_idle_conn = 2
+
+ # Max conn setting default is 0 (mean not set)
+ max_open_conn =
+
+ # For "postgres", use either "disable", "require" or "verify-full"
+ # For "mysql", use either "true", "false", or "skip-verify".
+ ssl_mode = disable
+
+ ca_cert_path =
+ client_key_path =
+ client_cert_path =
+ server_cert_name =
+
+ # For "sqlite3" only, path relative to data_path setting
+ path = grafana.db
+
+ #################################### Session #############################
+ [session]
+ # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
+ provider = file
+
+ # Provider config options
+ # memory: not have any config yet
+ # file: session dir path, is relative to grafana data_path
+ # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
+ # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
+ # mysql: go-sql-driver/mysql dsn config string, examples:
+ # `user:password@tcp(127.0.0.1:3306)/database_name`
+ # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
+ # memcache: 127.0.0.1:11211
+
+
+ provider_config = sessions
+
+ # Session cookie name
+ cookie_name = grafana_sess
+
+ # If you use session in https only, default is false
+ cookie_secure = false
+
+ # Session life time, default is 86400
+ session_life_time = 86400
+ gc_interval_time = 86400
+
+ #################################### Data proxy ###########################
+ [dataproxy]
+
+ # This enables data proxy logging, default is false
+ logging = false
+
+ #################################### Analytics ###########################
+ [analytics]
+ # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+ # No ip addresses are being tracked, only simple counters to track
+ # running instances, dashboard and error counts. It is very helpful to us.
+ # Change this option to false to disable reporting.
+ reporting_enabled = true
+
+ # Set to false to disable all checks to https://grafana.com
+ # for new versions (grafana itself and plugins), check is used
+ # in some UI views to notify that grafana or plugin update exists
+ # This option does not cause any auto updates, nor send any information
+ # only a GET request to https://grafana.com to get latest versions
+ check_for_updates = true
+
+ # Google Analytics universal tracking code, only enabled if you specify an id here
+ google_analytics_ua_id =
+
+ # Google Tag Manager ID, only enabled if you specify an id here
+ google_tag_manager_id =
+
+ #################################### Security ############################
+ [security]
+ # default admin user, created on startup
+ admin_user = admin
+
+ # default admin password, can be changed before first start of grafana, or in profile settings
+ admin_password = admin
+
+ # used for signing
+ secret_key = SW2YcwTIb9zpOOhoPsMm
+
+ # Auto-login remember days
+ login_remember_days = 7
+ cookie_username = grafana_user
+ cookie_remember_name = grafana_remember
+
+ # disable gravatar profile images
+ disable_gravatar = false
+
+ # data source proxy whitelist (ip_or_domain:port separated by spaces)
+ data_source_proxy_whitelist =
+
+ [snapshots]
+ # snapshot sharing options
+ external_enabled = true
+ external_snapshot_url = https://snapshots-origin.raintank.io
+ external_snapshot_name = Publish to snapshot.raintank.io
+
+ # remove expired snapshot
+ snapshot_remove_expired = true
+
+ # remove snapshots after 90 days
+ snapshot_TTL_days = 90
+
+ #################################### Users ####################################
+ [users]
+ # disable user signup / registration
+ allow_sign_up = true
+
+ # Allow non admin users to create organizations
+ allow_org_create = true
+
+ # Set to true to automatically assign new users to the default organization (id 1)
+ auto_assign_org = true
+
+ # Default role new users will be automatically assigned (if auto_assign_org above is set to true)
+ auto_assign_org_role = Admin
+
+ # Require email validation before sign up completes
+ verify_email_enabled = false
+
+ # Background text for the user field on the login page
+ login_hint = email or username
+
+ # Default UI theme ("dark" or "light")
+ default_theme = dark
+
+ # External user management
+ external_manage_link_url =
+ external_manage_link_name =
+ external_manage_info =
+
+ [auth]
+ # Set to true to disable (hide) the login form, useful if you use OAuth
+ disable_login_form = true
+
+ # Set to true to disable the signout link in the side menu. useful if you use auth.proxy
+ disable_signout_menu = true
+
+ #################################### Anonymous Auth ######################
+ [auth.anonymous]
+ # enable anonymous access
+ enabled = true
+
+ # specify organization name that should be used for unauthenticated users
+ org_name = Main Org.
+
+ # specify role for unauthenticated users
+ org_role = Admin
+
+ #################################### Github Auth #########################
+ [auth.github]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url = https://github.com/login/oauth/authorize
+ token_url = https://github.com/login/oauth/access_token
+ api_url = https://api.github.com/user
+ team_ids =
+ allowed_organizations =
+
+ #################################### Google Auth #########################
+ [auth.google]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_client_id
+ client_secret = some_client_secret
+ scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
+ auth_url = https://accounts.google.com/o/oauth2/auth
+ token_url = https://accounts.google.com/o/oauth2/token
+ api_url = https://www.googleapis.com/oauth2/v1/userinfo
+ allowed_domains =
+ hosted_domain =
+
+ #################################### Grafana.com Auth ####################
+ # legacy key names (so they work in env variables)
+ [auth.grafananet]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ [auth.grafana_com]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ #################################### Generic OAuth #######################
+ [auth.generic_oauth]
+ name = OAuth
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url =
+ token_url =
+ api_url =
+ team_ids =
+ allowed_organizations =
+
+ #################################### Basic Auth ##########################
+ [auth.basic]
+ enabled = false
+
+ #################################### Auth Proxy ##########################
+ [auth.proxy]
+ enabled = true
+ header_name = X-WEBAUTH-USER
+ header_property = username
+ auto_sign_up = true
+ ldap_sync_ttl = 60
+ whitelist =
+
+ #################################### Auth LDAP ###########################
+ [auth.ldap]
+ enabled = false
+ config_file = /etc/grafana/ldap.toml
+ allow_sign_up = true
+
+ #################################### SMTP / Emailing #####################
+ [smtp]
+ enabled = false
+ host = localhost:25
+ user =
+ # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
+ password =
+ cert_file =
+ key_file =
+ skip_verify = false
+ from_address = admin@grafana.localhost
+ from_name = Grafana
+ ehlo_identity =
+
+ [emails]
+ welcome_email_on_sign_up = false
+ templates_pattern = emails/*.html
+
+ #################################### Logging ##########################
+ [log]
+ # Either "console", "file", "syslog". Default is console and file
+ # Use space to separate multiple modes, e.g. "console file"
+ mode = console file
+
+ # Either "debug", "info", "warn", "error", "critical", default is "info"
+ level = error
+
+ # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
+ filters =
+
+ # For "console" mode only
+ [log.console]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = console
+
+ # For "file" mode only
+ [log.file]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # This enables automated log rotate(switch of following options), default is true
+ log_rotate = true
+
+ # Max line number of single file, default is 1000000
+ max_lines = 1000000
+
+ # Max size shift of single file, default is 28 means 1 << 28, 256MB
+ max_size_shift = 28
+
+ # Segment log daily, default is true
+ daily_rotate = true
+
+ # Expired days of log file(delete after max days), default is 7
+ max_days = 7
+
+ [log.syslog]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+ network =
+ address =
+
+ # Syslog facility. user, daemon and local0 through local7 are valid.
+ facility =
+
+ # Syslog tag. By default, the process' argv[0] is used.
+ tag =
+
+
+ #################################### AMQP Event Publisher ################
+ [event_publisher]
+ enabled = false
+ rabbitmq_url = amqp://localhost/
+ exchange = grafana_events
+
+ #################################### Dashboard JSON files ################
+ [dashboards.json]
+ enabled = false
+ path = /var/lib/grafana/dashboards
+
+ #################################### Usage Quotas ########################
+ [quota]
+ enabled = false
+
+ #### set quotas to -1 to make unlimited. ####
+ # limit number of users per Org.
+ org_user = 10
+
+ # limit number of dashboards per Org.
+ org_dashboard = 100
+
+ # limit number of data_sources per Org.
+ org_data_source = 10
+
+ # limit number of api_keys per Org.
+ org_api_key = 10
+
+ # limit number of orgs a user can create.
+ user_org = 10
+
+ # Global limit of users.
+ global_user = -1
+
+ # global limit of orgs.
+ global_org = -1
+
+ # global limit of dashboards
+ global_dashboard = -1
+
+ # global limit of api_keys
+ global_api_key = -1
+
+ # global limit on number of logged in users.
+ global_session = -1
+
+ #################################### Alerting ############################
+ [alerting]
+ # Disable alerting engine & UI features
+ enabled = true
+ # Makes it possible to turn off alert rule execution but alerting UI is visible
+ execute_alerts = true
+
+ #################################### Internal Grafana Metrics ############
+ # Metrics available at HTTP API Url /api/metrics
+ [metrics]
+ enabled = true
+ interval_seconds = 10
+
+ # Send internal Grafana metrics to graphite
+ [metrics.graphite]
+ # Enable by setting the address setting (ex localhost:2003)
+ address =
+ prefix = prod.grafana.%(instance_name)s.
+
+ [grafana_net]
+ url = https://grafana.com
+
+ [grafana_com]
+ url = https://grafana.com
+
+ #################################### Distributed tracing ############
+ [tracing.jaeger]
+ # jaeger destination (ex localhost:6831)
+ address =
+ # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
+ always_included_tag =
+ # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ sampler_type = const
+ # jaeger samplerconfig param
+ # for "const" sampler, 0 or 1 for always false/true respectively
+ # for "probabilistic" sampler, a probability between 0 and 1
+ # for "rateLimiting" sampler, the number of spans per second
+ # for "remote" sampler, param is the same as for "probabilistic"
+ # and indicates the initial sampling rate before the actual one
+ # is received from the mothership
+ sampler_param = 1
+
+ #################################### External Image Storage ##############
+ [external_image_storage]
+ # You can choose between (s3, webdav, gcs)
+ provider =
+
+ [external_image_storage.s3]
+ bucket_url =
+ bucket =
+ region =
+ path =
+ access_key =
+ secret_key =
+
+ [external_image_storage.webdav]
+ url =
+ username =
+ password =
+ public_url =
+
+ [external_image_storage.gcs]
+ key_file =
+ bucket =
diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml
new file mode 100644
index 000000000..bc7b4b286
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp.yml
@@ -0,0 +1,76 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+objects:
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ ports:
+ - port: 8082
+ protocol: TCP
+ targetPort: grafana-http
+- apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ replicas: 1
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ volumes:
+ - name: data
+ emptyDir: {}
+ containers:
+ - image: "mrsiano/grafana-ocp:latest"
+ name: grafana-ocp
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - name: data
+ mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ command:
+ - "./bin/grafana-server"
diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
new file mode 100644
index 000000000..f59ca997f
--- /dev/null
+++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
@@ -0,0 +1,5138 @@
+{
+ "dashboard": {
+ "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.",
+ "editable": true,
+ "gnetId": 315,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "rows": [
+ {
+ "collapse": false,
+ "height": "200px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "200px",
+ "id": 32,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Received",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Sent",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network I/O pressure",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Network I/O pressure",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 4,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster memory usage",
+ "transparent": false,
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 6,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster CPU usage ",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster filesystem usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 9,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "20%",
+ "prefix": "",
+ "prefixFontSize": "20%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 10,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 11,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 12,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 13,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 14,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Total usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "overall cpu usage",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cluster CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": "% Usage",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": null,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers Cores Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 411,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 34,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes Memory usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 26,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ systemd_service_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 30,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "D",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "C",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "E",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "F",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 277,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ pod_name }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ pod_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ id }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ id }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 35,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total) by (phase,reason)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ phase }} | {{ reason }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_build_total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 54,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 55,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 56,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Failed\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds, regardless of the failure reason.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 58,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Complete\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of successfully completed builds.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 0,
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\"} offset 5m",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ reason }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Builds",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 36,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_setup_latency_sum)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_setup_latency_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 41,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_teardown_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 pods doing the most receive network traffic",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 37,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_pod_ips",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ instance }} | {{ role }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_ips",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 39,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 42,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 40,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift SDN",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 44,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_pleg_relist",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_timeout",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_errors",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Kubelet",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 46,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(scrape_samples_scraped[2m])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{ kubernetes_name }} | {{ instance }} ",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "scrape_samples_scraped",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU per instance of Prometheus container.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Prometheus",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ verb }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ pod }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of non-mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 74,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "endpoint_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": " quantile {{ quantile }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "endpoint_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "API Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 61,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "etcd_disk_wal_fsync_duration_seconds_count",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "etcd_disk_wal_fsync_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "etcd",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 62,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(changes(container_start_time_seconds[10m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "The number of containers that start or restart over the last ten minutes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Changes in your cluster",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 63,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of cores in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 64,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of consumed cores.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per node in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 66,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumption per system service or container on the infrastructure nodes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per namespace on the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 47,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster CPU in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 69,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster memory in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 70,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Aggregate CPU usage (seconds total) of etcd+docker",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "System and container CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 71,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "volumes_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "volumes_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 72,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ request }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "cloudprovider_aws_api_request_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 73,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "storage_operation_duration_seconds_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Volumes",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes",
+ "openshift"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {},
+ "datasource": "${DS_PR}",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Node",
+ "options": [],
+ "query": "label_values(kubernetes_io_hostname)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "1s",
+ "2m",
+ "20s",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "openshift cluster monitoring",
+ "version": 6
+ }
+}
diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml
new file mode 100644
index 000000000..8dea6f197
--- /dev/null
+++ b/roles/openshift_grafana/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Eldad Marciano
+ description: Setup grafana pod
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - metrics
diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml
new file mode 100644
index 000000000..9d3c741ee
--- /dev/null
+++ b/roles/openshift_grafana/tasks/gf-permissions.yml
@@ -0,0 +1,12 @@
+---
+- name: Create gf user on htpasswd
+ command: htpasswd -c /etc/origin/master/htpasswd gfadmin
+
+- name: Make sure master config use HTPasswdPasswordIdentityProvider
+ command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml"
+
+- name: Grant permission for gfuser
+ command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin
+
+- name: Restart mater api
+ command: systemctl restart atomic-openshift-master-api.service
diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml
new file mode 100644
index 000000000..6a06d40a9
--- /dev/null
+++ b/roles/openshift_grafana/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+- name: Create grafana namespace
+ oc_project:
+ state: present
+ name: grafana
+
+- name: Configure Grafana Permissions
+ include_tasks: tasks/gf-permissions.yml
+ when: gf_oauth | default(false) | bool == true
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml
+ register:
+ cl_file: /tmp/grafana-ocp.yaml
+ when: gf_oauth | default(false) | bool == false
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml
+ register:
+ cl_file: /tmp/grafana-ocp-oauth.yaml
+ when: gf_oauth | default(false) | bool == true
+
+- name: Process the grafana file
+ oc_process:
+ namespace: grafana
+ template_name: "{{ cl_file }}"
+ create: True
+ when: gf_oauth | default(false) | bool == true
+
+- name: Wait to grafana be running
+ command: oc rollout status deployment/grafana-ocp
+
+- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }}
+ oc_adm_policy_user:
+ user: grafana-ocp
+ resource_kind: cluster-role
+ resource_name: view
+ state: present
+ role_namespace: "{{ gf_prometheus_namespace }}"
+
+- name: Get grafana route
+ oc_obj:
+ kind: route
+ name: grafana
+ namespace: grafana
+ register: route
+
+- name: Get prometheus route
+ oc_obj:
+ kind: route
+ name: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: route
+
+- name: Get the prometheus SA
+ oc_serviceaccount_secret:
+ state: list
+ service_account: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: sa
+
+- name: Get the management SA bearer token
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: "{{ gf_prometheus_namespace }}"
+ no_log: True
+ register: sa_secret
+
+- name: Get the SA bearer token for prometheus
+ set_fact:
+ token: "{{ sa_secret.results.encoded.token }}"
+
+- name: Convert to json
+ var:
+ ds_json: "{{ gf_body_tmp }} | to_json }}"
+
+- name: Set protocol type
+ var:
+ protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}"
+
+- name: Add gf datasrouce
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/datasources"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex setup ds name
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
+
+- name: Add new dashboard
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/dashboards/db"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex json tear down
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index dcaf87eca..c83adb26d 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -175,6 +175,8 @@ def format_failure(failure):
play = failure['play']
task = failure['task']
msg = failure['msg']
+ if not isinstance(msg, string_types):
+ msg = str(msg)
checks = failure['checks']
fields = (
(u'Hosts', host),
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index bc8e7bdcf..b8a59ee14 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index b7b16e0ea..b9c41d1b4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -5,6 +5,7 @@ Health checks for OpenShift clusters.
import json
import operator
import os
+import re
import time
import collections
@@ -95,6 +96,13 @@ class OpenShiftCheck(object):
# These are intended to be a sequential record of what the check observed and determined.
self.logs = []
+ def template_var(self, var_to_template):
+ """Return a templated variable if self._templar is not None, else
+ just return the variable as-is"""
+ if self._templar is not None:
+ return self._templar.template(var_to_template)
+ return var_to_template
+
@abstractproperty
def name(self):
"""The name of this check, usually derived from the class name."""
@@ -302,28 +310,38 @@ class OpenShiftCheck(object):
name_list = name_list.split(',')
return [name.strip() for name in name_list if name.strip()]
- @staticmethod
- def get_major_minor_version(openshift_image_tag):
+ def get_major_minor_version(self, openshift_image_tag=None):
"""Parse and return the deployed version of OpenShift as a tuple."""
- if openshift_image_tag and openshift_image_tag[0] == 'v':
- openshift_image_tag = openshift_image_tag[1:]
- # map major release versions across releases
- # to a common major version
- openshift_major_release_version = {
- "1": "3",
- }
+ version = openshift_image_tag or self.get_var("openshift_image_tag")
+ components = [int(component) for component in re.findall(r'\d+', version)]
- components = openshift_image_tag.split(".")
- if not components or len(components) < 2:
+ if len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(openshift_image_tag))
+ raise OpenShiftCheckException(msg.format(version))
+
+ # map major release version across releases to OCP major version
+ components[0] = {1: 3}.get(components[0], components[0])
+
+ return tuple(int(x) for x in components[:2])
+
+ def get_required_version(self, name, version_map):
+ """Return the correct required version(s) for the current (or nearest) OpenShift version."""
+ openshift_version = self.get_major_minor_version()
+
+ earliest = min(version_map)
+ latest = max(version_map)
+ if openshift_version < earliest:
+ return version_map[earliest]
+ if openshift_version > latest:
+ return version_map[latest]
- if components[0] in openshift_major_release_version:
- components[0] = openshift_major_release_version[components[0]]
+ required_version = version_map.get(openshift_version)
+ if not required_version:
+ msg = "There is no recommended version of {} for the current version of OpenShift ({})"
+ raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version)))
- components = tuple(int(x) for x in components[:2])
- return components
+ return required_version
def find_ansible_mount(self, path):
"""Return the mount point for path from ansible_mounts."""
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 87e6146d4..6e30a8610 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck):
'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
- # see roles/openshift_cli/library/openshift_container_binary_sync.py.
+ # see roles/lib_utils/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4f91f6bb3..ac6ffbbad 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# ordered list of registries (according to inventory vars) that docker will try for unscoped images
regs = self.ensure_list("openshift_docker_additional_registries")
# currently one of these registries is added whether the user wants it or not.
- deployment_type = self.get_var("openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type", default="")
if deployment_type == "origin" and "docker.io" not in regs:
regs.append("docker.io")
elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
@@ -64,7 +64,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
self.registries["configured"] = regs
# for the oreg_url registry there may be credentials specified
- components = self.get_var("oreg_url", default="").split('/')
+ oreg_url = self.get_var("oreg_url", default="")
+ oreg_url = self.template_var(oreg_url)
+ components = oreg_url.split('/')
self.registries["oreg"] = "" if len(components) < 3 else components[0]
# Retrieve and template registry credentials, if provided
@@ -72,9 +74,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
oreg_auth_user = self.get_var('oreg_auth_user', default='')
oreg_auth_password = self.get_var('oreg_auth_password', default='')
if oreg_auth_user != '' and oreg_auth_password != '':
- if self._templar is not None:
- oreg_auth_user = self._templar.template(oreg_auth_user)
- oreg_auth_password = self._templar.template(oreg_auth_password)
+ oreg_auth_user = self.template_var(oreg_auth_user)
+ oreg_auth_password = self.template_var(oreg_auth_password)
self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
@@ -153,6 +154,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
image_url = self.get_var("oreg_url", default="") or image_url
+ image_url = self.template_var(image_url)
if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
@@ -160,7 +162,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
required.add(self._registry_console_image(image_tag, image_info))
# images for containerized components
- if self.get_var("openshift", "common", "is_containerized"):
+ if self.get_var("openshift_is_containerized"):
components = set()
if 'oo_nodes_to_config' in host_groups:
components.update(["node", "openvswitch"])
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
index 8b20ccb49..b56d2092b 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck):
return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
def run(self):
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- unit = "etcd_container" if is_containerized else "etcd"
+ openshift_is_containerized = self.get_var("openshift_is_containerized")
+ unit = "etcd_container" if openshift_is_containerized else "etcd"
log_matchers = [{
"start_regexp": r"Starting Etcd Server",
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 986a01f38..7f8c6ebdc 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck):
"""
errors = []
for pod_name in pods_by_name.keys():
- df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 3b1cf8baa..16ec3a7f6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment
import json
import ssl
-try:
- from urllib2 import HTTPError, URLError
- import urllib2
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib import request
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
@@ -65,7 +64,7 @@ class Kibana(LoggingCheck):
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
- return_code = urllib2.urlopen(url, context=ctx).getcode()
+ return_code = request.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index cfbdea303..567162be1 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -10,8 +10,8 @@ class NotContainerizedMixin(object):
def is_active(self):
"""Only run on non-containerized hosts."""
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- return super(NotContainerizedMixin, self).is_active() and not is_containerized
+ openshift_is_containerized = self.get_var("openshift_is_containerized")
+ return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized
class DockerHostMixin(object):
@@ -23,7 +23,7 @@ class DockerHostMixin(object):
"""Only run on hosts that depend on Docker."""
group_names = set(self.get_var("group_names", default=[]))
needs_docker = set(["oo_nodes_to_config"])
- if self.get_var("openshift.common.is_containerized"):
+ if self.get_var("openshift_is_containerized"):
needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
@@ -33,7 +33,7 @@ class DockerHostMixin(object):
(which would not be able to install but should already have them).
Returns: msg, failed
"""
- if self.get_var("openshift", "common", "is_atomic"):
+ if self.get_var("openshift_is_atomic"):
return "", False
# NOTE: we would use the "package" module but it's actually an action plugin
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 0cad19842..58a2692bd 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
currently installed version of OpenShift.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
tags = ["health"]
openshift_to_ovs_version = {
- "3.7": ["2.6", "2.7", "2.8"],
- "3.6": ["2.6", "2.7", "2.8"],
- "3.5": ["2.6", "2.7"],
- "3.4": "2.4",
+ (3, 4): "2.4",
+ (3, 5): ["2.6", "2.7"],
+ (3, 6): ["2.6", "2.7", "2.8"],
+ (3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
def is_active(self):
@@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
return self.execute_module("rpm_version", args)
def get_required_ovs_version(self):
- """Return the correct Open vSwitch version for the current OpenShift version"""
- openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
-
- if openshift_version_tuple < (3, 5):
- return self.openshift_to_ovs_version["3.4"]
-
- openshift_version = ".".join(str(x) for x in openshift_version_tuple)
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if ovs_version:
- return self.openshift_to_ovs_version[openshift_version]
-
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(openshift_version))
+ """Return the correct Open vSwitch version(s) for the current OpenShift version."""
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index f3a628e28..28aee8b35 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,8 +1,6 @@
"""Check that available RPM packages match the required versions."""
-import re
-
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 5): ["2.6", "2.7"],
(3, 6): ["2.6", "2.7", "2.8"],
(3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
openshift_to_docker_version = {
@@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 4): "1.12",
(3, 5): "1.12",
(3, 6): "1.12",
- }
-
- # map major OpenShift release versions across releases to a common major version
- map_major_release_version = {
- 1: 3,
+ (3, 7): "1.12",
+ (3, 8): "1.12",
+ (3, 9): ["1.12", "1.13"],
}
def is_active(self):
@@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_ovs_version)
- latest = max(self.openshift_to_ovs_version)
- if openshift_version < earliest:
- return self.openshift_to_ovs_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_ovs_version[latest]
-
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if not ovs_version:
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return ovs_version
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
def get_required_docker_version(self):
"""Return the correct Docker version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_docker_version)
- latest = max(self.openshift_to_docker_version)
- if openshift_version < earliest:
- return self.openshift_to_docker_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_docker_version[latest]
-
- docker_version = self.openshift_to_docker_version.get(openshift_version)
- if not docker_version:
- msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return docker_version
-
- def get_openshift_version_tuple(self):
- """Return received image tag as a normalized (X, Y) minor version tuple."""
- version = self.get_var("openshift_image_tag")
- comps = [int(component) for component in re.findall(r'\d+', version)]
-
- if len(comps) < 2:
- msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(version))
-
- comps[0] = self.map_major_release_version.get(comps[0], comps[0])
- return tuple(comps[0:2])
+ return self.get_required_version("Docker", self.openshift_to_docker_version)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index fc333dfd4..9fd6e049d 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability,
@pytest.fixture()
def task_vars():
return dict(
- openshift=dict(
- common=dict(
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(),
- ),
+ openshift_is_atomic=False,
+ openshift_is_containerized=False,
openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
@@ -20,7 +15,7 @@ def task_vars():
)
-@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
@@ -30,20 +25,20 @@ def task_vars():
("origin", True, ["nfs"], False),
("openshift-enterprise", True, ["lb"], False),
])
-def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
- task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift_is_containerized'] = openshift_is_containerized
task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
-@pytest.mark.parametrize("is_containerized,is_atomic", [
+@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
-def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {}
@@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
'images': [module_args['name']],
}
- task_vars['openshift']['common']['is_containerized'] = is_containerized
- task_vars['openshift']['common']['is_atomic'] = is_atomic
+ task_vars['openshift_is_containerized'] = openshift_is_containerized
+ task_vars['openshift_is_atomic'] = openshift_is_atomic
result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
assert expect_registries_reached == check.reachable_registries
-@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['oo_nodes_to_config'], "",
set([
@@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
),
])
-def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected):
task_vars = dict(
- openshift=dict(
- common=dict(
- is_containerized=is_containerized,
- is_atomic=False,
- ),
- ),
+ openshift_is_containerized=openshift_is_containerized,
+ openshift_is_atomic=False,
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
@@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected):
def test_containerized_etcd():
task_vars = dict(
- openshift=dict(
- common=dict(
- is_containerized=True,
- ),
- ),
+ openshift_is_containerized=True,
openshift_deployment_type="origin",
group_names=['oo_etcd_to_config'],
)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 8fa68c378..33a5dd90a 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException
from openshift_checks.docker_storage import DockerStorage
-@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [
(False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
(False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
(True, ["oo_etcd_to_config"], True),
])
-def test_is_active(is_containerized, group_names, is_active):
+def test_is_active(openshift_is_containerized, group_names, is_active):
task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
group_names=group_names,
)
assert DockerStorage(None, task_vars).is_active() == is_active
def non_atomic_task_vars():
- return {"openshift": {"common": {"is_atomic": False}}}
+ return {"openshift_is_atomic": False}
@pytest.mark.parametrize('docker_info, failed, expect_msg', [
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index a29dc166b..583c4c8dd 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
- openshift=dict(
- common=dict(is_containerized=False),
- ),
+ openshift_is_containerized=False,
openshift_service_type="origin"
)
@@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
assert result.get("failed", False) == failed
-@pytest.mark.parametrize('is_containerized,expected_unit_value', [
+@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [
(False, "etcd"),
(True, "etcd_container"),
])
-def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value):
+def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):
task_vars = dict(
- openshift=dict(
- common=dict(is_containerized=is_containerized),
- )
+ openshift_is_containerized=openshift_is_containerized
)
def execute_module(module_name, args, *_):
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 04a5e89c4..750d4b9e9 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -1,12 +1,10 @@
import pytest
import json
-try:
- import urllib2
- from urllib2 import HTTPError, URLError
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error
+# pylint: disable=import-error
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException
@@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
if type(lib_result) is int:
return _http_return(lib_result)
raise lib_result
- monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+ monkeypatch.setattr(request, 'urlopen', urlopen)
check = Kibana()
check._get_kibana_url = lambda: 'url'
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
index b1a41ca3c..b5d6f2e95 100644
--- a/roles/openshift_health_checker/test/mixins_test.py
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
@pytest.mark.parametrize('task_vars,expected', [
- (dict(openshift=dict(common=dict(is_containerized=False))), True),
- (dict(openshift=dict(common=dict(is_containerized=True))), False),
+ (dict(openshift_is_containerized=False), True),
+ (dict(openshift_is_containerized=True), False),
])
def test_is_active(task_vars, expected):
assert NotContainerizedCheck(None, task_vars).is_active() == expected
@@ -20,4 +20,4 @@ def test_is_active(task_vars, expected):
def test_is_active_missing_task_vars():
with pytest.raises(OpenShiftCheckException) as excinfo:
NotContainerizedCheck().is_active()
- assert 'is_containerized' in str(excinfo.value)
+ assert 'openshift_is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index dd98ff4d8..80c7a0541 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -1,26 +1,7 @@
import pytest
-from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
-
-
-def test_openshift_version_not_supported():
- def execute_module(*_):
- return {}
-
- openshift_release = '111.7.0'
-
- task_vars = dict(
- openshift=dict(common=dict()),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
- openshift_service_type='origin'
- )
-
- with pytest.raises(OpenShiftCheckException) as excinfo:
- OvsVersion(execute_module, task_vars).run()
-
- assert "no recommended version of Open vSwitch" in str(excinfo.value)
+from openshift_checks.ovs_version import OvsVersion
+from openshift_checks import OpenShiftCheckException
def test_invalid_openshift_release_format():
@@ -70,7 +51,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
assert result is return_value
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
(['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
(['oo_masters_to_config'], True, False),
@@ -82,9 +63,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
(['lb'], False, False),
(['nfs'], False, False),
])
-def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert OvsVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index a1e6e0879..52740093d 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -3,16 +3,16 @@ import pytest
from openshift_checks.package_availability import PackageAvailability
-@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [
('yum', False, True),
('yum', True, False),
('dnf', True, False),
('dnf', False, False),
])
-def test_is_active(pkg_mgr, is_containerized, is_active):
+def test_is_active(pkg_mgr, openshift_is_containerized, is_active):
task_vars = dict(
ansible_pkg_mgr=pkg_mgr,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert PackageAvailability(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index ea8e02b97..868b4bd12 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,6 +1,7 @@
import pytest
-from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
+from openshift_checks.package_version import PackageVersion
+from openshift_checks import OpenShiftCheckException
def task_vars_for(openshift_release, deployment_type):
@@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type):
def test_openshift_version_not_supported():
check = PackageVersion(None, task_vars_for("1.2.3", 'origin'))
- check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict
+ check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict
with pytest.raises(OpenShiftCheckException) as excinfo:
check.get_required_ovs_version()
@@ -99,7 +100,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
assert result == return_value
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
(['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
(['oo_masters_to_config'], True, False),
@@ -111,9 +112,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
(['lb'], False, False),
(['nfs'], False, False),
])
-def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert PackageVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index b6501d288..f40085976 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: []
############
openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
-penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_registry_routecertificates: {}
openshift_hosted_registry_routetermination: "passthrough"
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index ac9e241a5..ace2d15b0 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -14,4 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_facts
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index d306adf42..57f59f872 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,6 +1,6 @@
---
-# This role is intended to be used with include_role.
-# include_role:
+# This role is intended to be used with import_role.
+# import_role:
# name: openshift_hosted
# tasks_from: "{{ item }}"
# with_items:
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 429f0c514..22294e3d4 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,10 +1,4 @@
---
-- name: Create temp directory for doing work in
- command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
- register: mktempHosted
- changed_when: False
- check_mode: no
-
- name: setup firewall
import_tasks: firewall.yml
vars:
@@ -132,25 +126,10 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+# TODO(michaelgugino) remove this set fact. It is currently necessary due to
+# custom module not properly templating variables.
- name: setup registry list
set_fact:
r_openshift_hosted_registry_list:
- name: "{{ openshift_hosted_registry_name }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
-
-- name: Wait for pod (Registry)
- include_tasks: wait_for_pod.yml
- vars:
- l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
- l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
-
-- include_tasks: storage/glusterfs.yml
- when:
- - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
-
-- name: Delete temp directory
- file:
- name: "{{ mktempHosted.stdout }}"
- state: absent
- changed_when: False
- check_mode: no
diff --git a/roles/openshift_hosted/tasks/registry_storage.yml b/roles/openshift_hosted/tasks/registry_storage.yml
new file mode 100644
index 000000000..aa66a7867
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry_storage.yml
@@ -0,0 +1,4 @@
+---
+- include_tasks: storage/glusterfs.yml
+ when:
+ - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 4e9219477..c2be00d19 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -18,6 +18,7 @@
- name: set_fact replicas
set_fact:
+ # get_router_replicas is a custom filter in role lib_utils
replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
@@ -25,10 +26,10 @@
backup: True
dest: "/etc/origin/master/{{ item | basename }}"
src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
- oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ with_items: "{{ openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') |
+ lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or
- ( openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )
+ ( openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )
# This is for when we desire a cluster signed cert
@@ -55,7 +56,7 @@
when:
- openshift_hosted_router_create_certificate | bool
- openshift_hosted_router_certificate == {}
- - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0
+ - openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0
- name: Create the router service account(s)
oc_serviceaccount:
@@ -98,9 +99,3 @@
ports: "{{ item.ports }}"
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-
-- name: Wait for pod (Routers)
- include_tasks: wait_for_pod.yml
- vars:
- l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
- l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index 18b2edcc6..b39c44b01 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -17,7 +17,7 @@
until:
- "registry_pods.results.results[0]['items'] | count > 0"
# There must be as many matching pods with 'Ready' status True as there are expected replicas
- - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
+ - "registry_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
delay: 10
retries: "{{ (600 / 10) | int }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index bd7181c17..fef945d51 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: Generate GlusterFS registry endpoints
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
@@ -10,7 +16,14 @@
dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry service and endpoint
- command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
+ command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml
index 056c79334..a14b0febc 100644
--- a/roles/openshift_hosted/tasks/wait_for_pod.yml
+++ b/roles/openshift_hosted/tasks/wait_for_pod.yml
@@ -3,17 +3,17 @@
block:
- name: Ensure OpenShift pod correctly rolls out (best-effort today)
command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \
--namespace {{ item.namespace | default('default') }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig
async: 600
- poll: 15
+ poll: 5
with_items: "{{ l_openshift_hosted_wfp_items }}"
failed_when: false
- name: Determine the latest version of the OpenShift pod deployment
command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \
--namespace {{ item.namespace }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -22,14 +22,14 @@
- name: Poll for OpenShift pod deployment success
command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
--namespace {{ item.0.namespace }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
register: openshift_hosted_wfp_rc_phase
until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout"
- delay: 15
- retries: 40
+ delay: 5
+ retries: 60
failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout"
with_together:
- "{{ l_openshift_hosted_wfp_items }}"
diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml
index f4fd15089..48d62c8df 100644
--- a/roles/openshift_hosted_templates/defaults/main.yml
+++ b/roles/openshift_hosted_templates/defaults/main.yml
@@ -1,5 +1,5 @@
---
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
+hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted"
hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}"
content_version: "{{ openshift.common.examples_content_version }}"
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
index 4027f524b..d7cc1e288 100644
--- a/roles/openshift_hosted_templates/meta/main.yml
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -11,4 +11,6 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml
index 89b92dfcc..34d39f3a5 100644
--- a/roles/openshift_hosted_templates/tasks/main.yml
+++ b/roles/openshift_hosted_templates/tasks/main.yml
@@ -1,20 +1,25 @@
---
- name: Create local temp dir for OpenShift hosted templates copy
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- become: False
register: copy_hosted_templates_mktemp
run_once: True
# AUDIT:changed_when: not set here because this task actually
# creates something
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 777 "{{ copy_hosted_templates_mktemp.stdout }}"
+ run_once: True
+
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" .
args:
# Disables the following warning:
# Consider using unarchive module rather than running tar
warn: no
- become: False
- register: copy_hosted_templates_tar
+
+- name: Chmod local tar of OpenShift examples
+ local_action: command chmod 744 "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar"
+ run_once: True
- name: Create remote OpenShift hosted templates directory
file:
@@ -28,7 +33,6 @@
dest: "{{ hosted_base }}/"
- name: Cleanup the OpenShift hosted templates temp dir
- become: False
local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent
- name: Modify registry paths if registry_url is not registry.access.redhat.com
@@ -52,7 +56,7 @@
- name: Create or update hosted templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }}
+ {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }}
-f {{ hosted_base }}
--config={{ openshift_hosted_templates_kubeconfig }}
-n openshift
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index f9c16ba40..d8c45fb33 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -2,6 +2,12 @@
r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_router_image_default_dict:
+ origin: 'openshift/origin-haproxy-router'
+ openshift-enterprise: 'openshift3/ose-haproxy-router'
+openshift_router_image_default: "{{ openshift_router_image_default_dict[openshift_deployment_type] }}"
+openshift_router_image: "{{ openshift_router_image_default }}"
+
haproxy_frontends:
- name: main
binds:
@@ -26,7 +32,7 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
cond: "{{ r_openshift_lb_use_nuage | bool }}"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
# NOTE
# r_openshift_lb_use_nuage_default may be defined external to this role.
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index 72298b599..3b5b45c5f 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
- role: openshift_facts
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 7d23ea6c8..4a11029ab 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -4,33 +4,33 @@
- name: Install haproxy
package: name=haproxy state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
- name: Pull haproxy image
command: >
- docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }}
- when: openshift.common.is_containerized | bool
+ docker pull {{ openshift_router_image }}:{{ openshift_image_tag }}
+ when: openshift_is_containerized | bool
- name: Create config directory for haproxy
file:
path: /etc/haproxy
state: directory
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Create the systemd unit files
template:
src: "haproxy.docker.service.j2"
dest: "/etc/systemd/system/haproxy.service"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
notify: restart haproxy
- name: Configure systemd service directory for haproxy
file:
path: /etc/systemd/system/haproxy.service.d
state: directory
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
# Work around ini_file create option in 2.2 which defaults to no
- name: Create limits.conf file
@@ -41,7 +41,7 @@
owner: root
group: root
changed_when: false
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Configure the nofile limits for haproxy
ini_file:
@@ -50,7 +50,7 @@
option: LimitNOFILE
value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
notify: restart haproxy
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Configure haproxy
template:
diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index 24fd635ec..de5a8d7c2 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -3,7 +3,7 @@
global
maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }}
log /dev/log local0 info
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin
{% else %}
chroot /var/lib/haproxy
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 0343a7eb0..90111449c 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift_router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 27cfc17d6..a192bd67e 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_external_address`: The IP address that mux will listen
+ on for connections from *external* clients. Default is the default ipv4
+ interface as reported by the `ansible_default_ipv4` fact.
- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index e1a5ea726..247c7e4df 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key):
raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def serviceaccount_name(qualified_sa):
''' Returns the simple name from a fully qualified name '''
return qualified_sa.split(":")[-1]
@@ -102,6 +94,28 @@ def serviceaccount_namespace(qualified_sa, default=None):
return seg[-1]
+def flatten_dict(data, parent_key=None):
+ """ This filter plugin will flatten a dict and its sublists into a single dict
+ """
+ if not isinstance(data, dict):
+ raise RuntimeError("flatten_dict failed, expects to flatten a dict")
+
+ merged = dict()
+
+ for key in data:
+ if parent_key is not None:
+ insert_key = '.'.join((parent_key, key))
+ else:
+ insert_key = key
+
+ if isinstance(data[key], dict):
+ merged.update(flatten_dict(data[key], insert_key))
+ else:
+ merged[insert_key] = data[key]
+
+ return merged
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -112,10 +126,10 @@ class FilterModule(object):
return {
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
- 'map_from_pairs': map_from_pairs,
'min_cpu': min_cpu,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
'serviceaccount_namespace': serviceaccount_namespace,
- 'walk': walk
+ 'walk': walk,
+ "flatten_dict": flatten_dict
}
diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py
new file mode 100644
index 000000000..d2c0bc456
--- /dev/null
+++ b/roles/openshift_logging/library/logging_patch.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+
+""" Ansible module to help with creating context patch file with whitelisting for logging """
+
+import difflib
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: logging_patch
+
+short_description: This will create a context patch file while giving ability
+ to whitelist some lines (excluding them from comparison)
+
+description:
+ - "To create configmap patches for logging"
+
+author:
+ - Eric Wolinetz ewolinet@redhat.com
+'''
+
+
+EXAMPLES = '''
+- logging_patch:
+ original_file: "{{ tempdir }}/current.yml"
+ new_file: "{{ configmap_new_file }}"
+ whitelist: "{{ configmap_protected_lines | default([]) }}"
+
+'''
+
+
+def account_for_whitelist(file_contents, white_list=None):
+ """ This method will remove lines that contain whitelist values from the content
+ of the file so that we aren't build a patch based on that line
+
+ Usage:
+
+ for file_contents:
+
+ index:
+ number_of_shards: {{ es_number_of_shards | default ('1') }}
+ number_of_replicas: {{ es_number_of_replicas | default ('0') }}
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+
+ and white_list:
+
+ ['number_of_shards', 'number_of_replicas']
+
+
+ We would end up with:
+
+ index:
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+ """
+
+ for line in white_list:
+ file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents)
+
+ return file_contents
+
+
+def run_module():
+ """ The body of the module, we check if the variable name specified as the value
+ for the key is defined. If it is then we use that value as for the original key """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ original_file=dict(type='str', required=True),
+ new_file=dict(type='str', required=True),
+ whitelist=dict(required=False, type='list', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ original_fh = open(module.params['original_file'], "r")
+ original_contents = original_fh.read()
+ original_fh.close()
+
+ original_contents = account_for_whitelist(original_contents, module.params['whitelist'])
+
+ new_fh = open(module.params['new_file'], "r")
+ new_contents = new_fh.read()
+ new_fh.close()
+
+ new_contents = account_for_whitelist(new_contents, module.params['whitelist'])
+
+ uni_diff = difflib.unified_diff(new_contents.splitlines(),
+ original_contents.splitlines(),
+ lineterm='')
+
+ return module.exit_json(changed=False, # noqa: F405
+ raw_patch="\n".join(uni_diff))
+
+
+def main():
+ """ main """
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 98d0d1c4f..37ffb0204 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand):
if comp is not None:
self.add_facts_for(comp, "services", name, dict())
+ # pylint: disable=too-many-arguments
+ def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):
+ '''Extracts facts in logging namespace from configmap'''
+ if yaml_file is not None:
+ config_facts = yaml.load(yaml_file)
+ self.facts[comp][kind][name][config_key] = config_facts
+ self.facts[comp][kind][name]["raw"] = yaml_file
+
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
@@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand):
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
- self.add_facts_for(comp, "configmaps", name, item["data"])
+ self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
+ if comp in ["elasticsearch", "elasticsearch_ops"]:
+ for config_key in item["data"]:
+ self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
def facts_for_oauthclients(self, namespace):
''' Gathers facts for oauthclients used with logging '''
@@ -265,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
# this needs to end up nested under the service account...
@@ -277,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
# pylint: disable=no-self-use, too-many-return-statements
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index 9c480f73a..01ed4918f 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index 59d6098d4..4a2ee64f0 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index af36d67c6..fbc3e3fd1 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -109,14 +109,14 @@
# remove annotations added by logging
- command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
- name: Remove Annotation of Operations Projects
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
annotate {{ project }} openshift.io/logging.ui.hostname-
with_items: "{{ __logging_ops_projects.stdout_lines }}"
@@ -126,7 +126,18 @@
- __logging_ops_projects.stderr | length == 0
## EventRouter
-- include_role:
+- import_role:
name: openshift_logging_eventrouter
when:
not openshift_logging_install_eventrouter | default(false) | bool
+
+# Update asset config in openshift-web-console namespace
+- name: Remove Kibana route information from web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_asset_config.yml
+ vars:
+ asset_config_edits:
+ - key: loggingPublicURL
+ value: ""
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 082c0128f..0d7f8c056 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
@@ -139,10 +139,10 @@
# TODO: make idempotent
- name: Generate proxy session
- set_fact: session_secret={{ 200 | oo_random_word}}
+ set_fact: session_secret={{ 200 | lib_utils_oo_random_word}}
check_mode: no
# TODO: make idempotent
- name: Generate oauth client secret
- set_fact: oauth_secret={{ 64 | oo_random_word}}
+ set_fact: oauth_secret={{ 64 | lib_utils_oo_random_word}}
check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
index d6ac88dcc..6e3204589 100644
--- a/roles/openshift_logging/tasks/generate_jks.yaml
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -24,25 +24,21 @@
local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
when: elasticsearch_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
when: logging_es_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
when: system_admin_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
when: truststore_jks.stat.exists
changed_when: False
- become: no
- name: pulling down signing items from host
fetch:
@@ -61,12 +57,10 @@
vars:
- top_dir: "{{local_tmp.stdout}}"
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- become: no
- name: Run JKS generation script
local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
check_mode: no
- become: no
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- name: Pushing locally generated JKS certs to remote host...
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index bb8ebec6b..f82e55b98 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -1,9 +1,12 @@
---
- name: Gather OpenShift Logging Facts
openshift_logging_facts:
- oc_bin: "{{openshift.common.client_binary}}"
+ oc_bin: "{{openshift_client_binary}}"
openshift_logging_namespace: "{{openshift_logging_namespace}}"
+## This is include vs import because we need access to group/inventory variables
+- include_tasks: set_defaults_from_current.yml
+
- name: Set logging project
oc_project:
state: present
@@ -84,14 +87,14 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
loop_control:
@@ -111,7 +114,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -148,7 +151,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -166,7 +169,7 @@
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
loop_control:
@@ -190,7 +193,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -210,7 +213,7 @@
## Kibana
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -223,7 +226,7 @@
openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -253,7 +256,7 @@
- include_tasks: annotate_ops_projects.yaml
## Curator
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -263,7 +266,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -281,7 +284,7 @@
- openshift_logging_use_ops | bool
## Mux
-- include_role:
+- import_role:
name: openshift_logging_mux
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -294,7 +297,7 @@
## Fluentd
-- include_role:
+- import_role:
name: openshift_logging_fluentd
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -305,10 +308,27 @@
## EventRouter
-- include_role:
+- import_role:
name: openshift_logging_eventrouter
when:
openshift_logging_install_eventrouter | default(false) | bool
+# TODO: Remove when asset config is removed from master-config.yaml
- include_tasks: update_master_config.yaml
+
+# Update asset config in openshift-web-console namespace
+- name: Add Kibana route information to web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_console_config.yml
+ vars:
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
+ value: "https://{{ openshift_logging_kibana_hostname }}"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
+ - key: loggingPublicURL
+ value: "https://{{ openshift_logging_kibana_hostname }}"
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 9949bb95d..60cc399fa 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -17,7 +17,11 @@
register: local_tmp
changed_when: False
check_mode: no
- become: no
+
+- name: Chmod local temp directory for doing work in
+ local_action: command chmod 777 "{{ local_tmp.stdout }}"
+ changed_when: False
+ check_mode: no
- include_tasks: install_logging.yaml
when:
@@ -31,4 +35,3 @@
local_action: file path="{{local_tmp.stdout}}" state=absent
tags: logging_cleanup
changed_when: False
- become: no
diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml
new file mode 100644
index 000000000..30087fe6a
--- /dev/null
+++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml
@@ -0,0 +1,35 @@
+---
+## The purpose of this task file is to get a patch that is based on the diff
+## between configmap_current_file and configmap_new_file. The module
+## logging_patch takes the paths of two files to compare and also a list of
+## variables whose line we exclude from the diffs.
+## We then patch the new configmap file so that we can build a configmap
+## using that file later. We then use oc apply to idempotenly modify any
+## existing configmap.
+
+## The following variables are expected to be provided when including this task:
+# __configmap_output -- This is provided to us from patch_configmap_files.yaml
+# it is a dict of the configmap where configmap_current_file exists
+# configmap_current_file -- The name of the data file in the __configmap_output
+# configmap_new_file -- The path to the file that we intend to oc apply later
+# we apply our generated patch to this file.
+# configmap_protected_lines -- The list of variables to exclude from the diff
+
+- copy:
+ content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}"
+ dest: "{{ tempdir }}/current.yml"
+
+- logging_patch:
+ original_file: "{{ tempdir }}/current.yml"
+ new_file: "{{ configmap_new_file }}"
+ whitelist: "{{ configmap_protected_lines | default([]) }}"
+ register: patch_output
+
+- copy:
+ content: "{{ patch_output.raw_patch }}\n"
+ dest: "{{ tempdir }}/patch.patch"
+ when: patch_output.raw_patch | length > 0
+
+- command: >
+ patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch"
+ when: patch_output.raw_patch | length > 0
diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml
new file mode 100644
index 000000000..74a9cc287
--- /dev/null
+++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml
@@ -0,0 +1,31 @@
+---
+## The purpose of this task file is to take in a list of configmap files provided
+## in the variable configmap_file_names, which correspond to the data sections
+## within a configmap. We iterate over each of these files and create a patch
+## from the diff between current_file and new_file to try to maintain any custom
+## changes that a user may have made to a currently deployed configmap while
+## trying to idempotently update with any role provided files.
+
+## The following variables are expected to be provided when including this task:
+# configmap_name -- This is the name of the configmap that the files exist in
+# configmap_namespace -- The namespace that the configmap lives in
+# configmap_file_names -- This is expected to be passed in as a dict
+# current_file -- The name of the data entry within the configmap
+# new_file -- The file path to the file we are comparing to current_file
+# protected_lines -- List of variables whose line will be excluded when creating a diff
+
+- oc_configmap:
+ name: "{{ configmap_name }}"
+ state: list
+ namespace: "{{ configmap_namespace }}"
+ register: __configmap_output
+
+- when: __configmap_output.results.stderr is undefined
+ include_tasks: patch_configmap_file.yaml
+ vars:
+ configmap_current_file: "{{ configmap_files.current_file }}"
+ configmap_new_file: "{{ configmap_files.new_file }}"
+ configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}"
+ with_items: "{{ configmap_file_names }}"
+ loop_control:
+ loop_var: configmap_files
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 00de0ca06..bc817075d 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -27,7 +27,7 @@
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml
new file mode 100644
index 000000000..dde362abe
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml
@@ -0,0 +1,34 @@
+---
+
+## We are pulling default values from configmaps if they exist already
+## Using conditional_set_fact allows us to set the value of a variable based on
+## the value of another one, if it is already defined. Else we don't set the
+## left hand side (it stays undefined as well).
+
+## conditional_set_fact allows us to specify a fact source, so first we try to
+## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps
+## afterwards we set the value of the variable based on the value in the inventory
+## but fall back to using the value from a configmap as a default. If neither is set
+## then the variable remains undefined and the role default will be used.
+
+- conditional_set_fact:
+ facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}"
+ vars:
+ __openshift_logging_es_number_of_shards: index.number_of_shards
+ __openshift_logging_es_number_of_replicas: index.number_of_replicas
+ when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined
+
+- conditional_set_fact:
+ facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}"
+ vars:
+ __openshift_logging_es_ops_number_of_shards: index.number_of_shards
+ __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas
+ when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards
+ openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas
+ openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards
+ openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index b96b8e29d..c0f42ba97 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -1,4 +1,5 @@
---
+# TODO: Remove when asset config is removed from master-config.yaml
- name: Adding Kibana route information to loggingPublicURL
modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
index d4635aab0..9f7c6341c 100644
--- a/roles/openshift_logging_curator/meta/main.yaml
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index e7ef5ff22..cc68998f5 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -2,7 +2,7 @@
- name: Set default image variables based on deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -54,14 +54,17 @@
- copy:
src: curator.yml
dest: "{{ tempdir }}/curator.yml"
- when: curator_config_contents is undefined
changed_when: no
-- copy:
- content: "{{ curator_config_contents }}"
- dest: "{{ tempdir }}/curator.yml"
- when: curator_config_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-curator"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "config.yaml"
+ new_file: "{{ tempdir }}/curator.yml"
- name: Set Curator configmap
oc_configmap:
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 95bf462d1..df5299a83 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_6"
-__allowed_curator_versions: ["3_5", "3_6", "3_7"]
+__latest_curator_version: "3_9"
+__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 6a9a6539c..e93d6b73e 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
index c53a06019..c55e7c5ea 100644
--- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -15,3 +15,5 @@
- fail:
msg: Invalid version specified for Elasticsearch
when: es_version not in __allowed_es_versions
+
+- include_tasks: get_es_version.yml
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
new file mode 100644
index 000000000..16de6f252
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -0,0 +1,42 @@
+---
+- command: >
+ oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _cluster_pods
+
+- name: "Getting ES version for logging-es cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _curl_output
+ when: _cluster_pods.stdout_lines | count > 0
+
+- command: >
+ oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _ops_cluster_pods
+
+- name: "Getting ES version for logging-es-ops cluster"
+ command: >
+ oc exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _ops_curl_output
+ when: _ops_cluster_pods.stdout_lines | count > 0
+
+- set_fact:
+ _es_output: "{{ _curl_output.stdout | from_json }}"
+ when: _curl_output.stdout is defined
+
+- set_fact:
+ _es_ops_output: "{{ _ops_curl_output.stdout | from_json }}"
+ when: _ops_curl_output.stdout is defined
+
+- set_fact:
+ _es_installed_version: "{{ _es_output.version.number }}"
+ when:
+ - _es_output is defined
+ - _es_output.version is defined
+ - _es_output.version.number is defined
+
+- set_fact:
+ _es_ops_installed_version: "{{ _es_ops_output.version.number }}"
+ when:
+ - _es_ops_output is defined
+ - _es_ops_output.version is defined
+ - _es_ops_output.version.number is defined
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 5fe683ae5..ff5ad1045 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -15,10 +15,10 @@
elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -32,6 +32,18 @@
- include_tasks: determine_version.yaml
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_installed_version is defined
+ - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_ops_installed_version is defined
+ - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -111,7 +123,7 @@
- name: Create logging-metrics-reader-role
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
-n "{{ openshift_logging_elasticsearch_namespace }}"
create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
@@ -168,33 +180,33 @@
when: es_logging_contents is undefined
changed_when: no
-- set_fact:
- __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
- __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
-
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
changed_when: no
-- copy:
- content: "{{ es_logging_contents }}"
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
- changed_when: no
-
-- copy:
- content: "{{ es_config_contents }}"
- dest: "{{ tempdir }}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
+# create diff between current configmap files and our current files
+# NOTE: include_role must be used instead of import_role because
+# this task file is looped over from another role.
+- include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ protected_lines: ["number_of_shards", "number_of_replicas"]
+ - current_file: "logging.yml"
+ new_file: "{{ tempdir }}/elasticsearch-logging.yml"
- name: Set ES configmap
oc_configmap:
@@ -352,7 +364,7 @@
delete_after: true
- set_fact:
- es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
when: openshift_logging_elasticsearch_deployment_name == ""
- set_fact:
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index 4a32453e3..6bce13d1d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -1,7 +1,25 @@
---
+# Disable external communication for {{ _cluster_component }}
+- name: Disable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ connection: blocked
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
+
## get all pods for the cluster
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -11,21 +29,42 @@
changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
when: _cluster_pods.stdout_lines | count > 0
+# Flush ES
+- name: "Flushing for logging-{{ _cluster_component }} cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
+ register: _flush_output
+ changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
+ when:
+ - _cluster_pods.stdout_lines | count > 0
+ - full_restart_cluster | bool
+
- command: >
oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _cluster_dcs
+## restart all dcs for full restart
+- name: "Restart ES node {{ _es_node }}"
+ include_tasks: restart_es_node.yml
+ with_items: "{{ _cluster_dcs }}"
+ loop_control:
+ loop_var: _es_node
+ when:
+ - full_restart_cluster | bool
+
## restart the node if it's dc is in the list of nodes to restart?
- name: "Restart ES node {{ _es_node }}"
include_tasks: restart_es_node.yml
with_items: "{{ _restart_logging_nodes }}"
loop_control:
loop_var: _es_node
- when: _es_node in _cluster_dcs.stdout
+ when:
+ - not full_restart_cluster | bool
+ - _es_node in _cluster_dcs.stdout
## we may need a new first pod to run against -- fetch them all again
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -33,3 +72,20 @@
oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
register: _enable_output
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
+
+# Reenable external communication for {{ _cluster_component }}
+- name: Reenable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
index b07b232ce..6d0df40c8 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -14,6 +14,8 @@
- _dc_output.results.results[0].status is defined
- _dc_output.results.results[0].status.readyReplicas is defined
- _dc_output.results.results[0].status.readyReplicas > 0
+ - _dc_output.results.results[0].status.updatedReplicas is defined
+ - _dc_output.results.results[0].status.updatedReplicas > 0
retries: 60
delay: 30
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cf6ee36bb..4b189f255 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -50,7 +50,7 @@ spec:
- -provider=openshift
- -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- - -cookie-secret={{ 16 | oo_random_word | b64encode }}
+ - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
- -upstream=https://localhost:9200
- '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
- '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index c8e995146..122231031 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,9 +1,10 @@
---
-__latest_es_version: "3_6"
-__allowed_es_versions: ["3_5", "3_6", "3_7"]
+__latest_es_version: "3_9"
+__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
+__es_version: "2.4.4"
__es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key"
@@ -14,3 +15,4 @@ es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int
es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+full_restart_cluster: False
diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_logging_eventrouter/meta/main.yaml
index 59bf680ce..711bb8f22 100644
--- a/roles/openshift_node_facts/meta/main.yml
+++ b/roles/openshift_logging_eventrouter/meta/main.yaml
@@ -1,10 +1,10 @@
---
galaxy_info:
- author: Andrew Butcher
- description: OpenShift Node Facts
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Eventrouter
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
index 96b181d61..31780a343 100644
--- a/roles/openshift_logging_eventrouter/tasks/main.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -1,8 +1,8 @@
---
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 9b58e4456..87b4204b5 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi
openshift_logging_fluentd_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_cpu_request: 100m
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
index 89c98204f..62f076780 100644
--- a/roles/openshift_logging_fluentd/meta/main.yaml
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
index 12b4f5bfd..2721438f0 100644
--- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -4,8 +4,7 @@
name: "{{ node }}"
kind: node
state: add
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_logging_fluentd_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
# wait half a second between labels
- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
- become: no
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 87eedfb4b..79ebbca08 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -34,10 +34,10 @@
msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues
when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal'
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -108,38 +108,28 @@
dest: "{{ tempdir }}/fluent.conf"
vars:
deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
- when: fluentd_config_contents is undefined
- changed_when: no
- copy:
src: fluentd-throttle-config.yaml
dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is undefined
- changed_when: no
- copy:
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_secureforward_contents is undefined
- changed_when: no
-
-- copy:
- content: "{{ fluentd_config_contents }}"
- dest: "{{ tempdir }}/fluent.conf"
- when: fluentd_config_contents is defined
- changed_when: no
-- copy:
- content: "{{ fluentd_throttle_contents }}"
- dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is defined
- changed_when: no
-
-- copy:
- content: "{{ fluentd_secureforward_contents }}"
- dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_secureforward_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-fluentd"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent.conf"
+ - current_file: "throttle-config.yaml"
+ new_file: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward.conf"
- name: Set Fluentd configmap
oc_configmap:
@@ -182,8 +172,8 @@
app_port: "{{ openshift_logging_fluentd_app_port }}"
ops_host: "{{ openshift_logging_fluentd_ops_host }}"
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
- fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}"
fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index 92a426952..b60da814f 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_6"
-__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
+__latest_fluentd_version: "3_9"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
index d97586a37..d9d76dfe0 100644
--- a/roles/openshift_logging_kibana/meta/main.yaml
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 77bf8042a..3c3bd902e 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -1,9 +1,9 @@
---
# fail is we don't have an endpoint for ES to connect to?
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -69,7 +69,7 @@
# gen session_secret if necessary
- name: Generate session secret
copy:
- content: "{{ 200 | oo_random_word }}"
+ content: "{{ 200 | lib_utils_oo_random_word }}"
dest: "{{ generated_certs_dir }}/session_secret"
when:
- not session_secret_file.stat.exists
@@ -77,7 +77,7 @@
# gen oauth_secret if necessary
- name: Generate oauth secret
copy:
- content: "{{ 64 | oo_random_word }}"
+ content: "{{ 64 | lib_utils_oo_random_word }}"
dest: "{{ generated_certs_dir }}/oauth_secret"
when:
- not oauth_secret_file.stat.exists
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 241877a02..fed926a3b 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_6"
-__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
+__latest_kibana_version: "3_9"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index db6f23126..e87c8d33e 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub
openshift_logging_mux_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_cpu_request: 100m
@@ -30,6 +31,7 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
+openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}"
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
index f271d8d7d..969752f15 100644
--- a/roles/openshift_logging_mux/meta/main.yaml
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 68948bce2..7eba3cda4 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -7,10 +7,10 @@
msg: Operations logs destination is required
when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -88,26 +88,24 @@
- copy:
src: fluent.conf
dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is undefined
changed_when: no
- copy:
src: secure-forward.conf
dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_securefoward_contents is undefined
changed_when: no
-- copy:
- content: "{{fluentd_mux_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is defined
- changed_when: no
-
-- copy:
- content: "{{fluentd_mux_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_secureforward_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-mux"
+ configmap_namespace: "{{ openshift_logging_mux_namespace }}"
+ configmap_file_names:
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent-mux.conf"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward-mux.conf"
- name: Set Mux configmap
oc_configmap:
@@ -150,7 +148,7 @@
port: "{{ openshift_logging_mux_port }}"
targetPort: "mux-forward"
external_ips:
- - "{{ ansible_eth0.ipv4.address }}"
+ - "{{ openshift_logging_mux_external_address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index e7b57f4b5..e87205bad 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_6"
-__allowed_mux_versions: ["3_5", "3_6", "3_7"]
+__latest_mux_version: "3_9"
+__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml
index d90cd28cf..a09808a39 100644
--- a/roles/openshift_manage_node/meta/main.yml
+++ b/roles/openshift_manage_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index a15f336e4..9251d380b 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -18,7 +18,7 @@
retries: 120
delay: 1
changed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
delegate_to: "{{ openshift_master_host }}"
run_once: true
@@ -50,10 +50,9 @@
name: "{{ openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_node_labels | lib_utils_oo_dict_to_list_of_dict }}"
namespace: default
when:
- "'nodename' in openshift.node"
- - "'labels' in openshift.node"
- - openshift.node.labels != {}
+ - openshift_node_labels | default({}) != {}
delegate_to: "{{ openshift_master_host }}"
diff --git a/roles/openshift_manageiq/meta/main.yml b/roles/openshift_manageiq/meta/main.yml
index 6c96a91bf..5c9481430 100644
--- a/roles/openshift_manageiq/meta/main.yml
+++ b/roles/openshift_manageiq/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
index 24b2ce6ac..357e6a710 100644
--- a/roles/openshift_management/tasks/add_container_provider.yml
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -1,6 +1,6 @@
---
- name: Ensure OpenShift facts module is available
- include_role:
+ import_role:
role: openshift_facts
- name: Ensure OpenShift facts are loaded
@@ -27,7 +27,7 @@
- name: Ensure the management SA bearer token is identified
set_fact:
- management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+ management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
- name: Ensure the SA bearer token value is read
oc_secret:
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index f212dba7c..c4b204b98 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -8,7 +8,7 @@
# This creates a service account allowing Container Provider
# integration (managing OCP/Origin via MIQ/Management)
- name: Enable Container Provider Integration
- include_role:
+ import_role:
role: openshift_manageiq
- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 94e11137c..9e3a4d43a 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -5,14 +5,14 @@
- name: Setting up NFS storage
block:
- name: Include the NFS Setup role tasks
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: setup
vars:
l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
- name: Create the App export
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: create_export
vars:
@@ -22,7 +22,7 @@
l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
- name: Create the DB export
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: create_export
vars:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index efd119299..7d96a467e 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -7,6 +7,12 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+osm_image_default_dict:
+ origin: 'openshift/origin'
+ openshift-enterprise: 'openshift3/ose'
+osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}"
+osm_image: "{{ osm_image_default }}"
+
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
origin: "docker.io"
@@ -47,12 +53,12 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}"
containerized_svc_dir: "/usr/lib/systemd/system"
ha_svc_template_path: "native-cluster"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
@@ -76,6 +82,15 @@ openshift_master_valid_grant_methods:
openshift_master_is_scaleup_host: False
+# openshift_master_oauth_template is deprecated. Should be added to deprecations
+# and removed.
+openshift_master_oauth_template: False
+openshift_master_oauth_templates_default:
+ login: "{{ openshift_master_oauth_template }}"
+openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}"
+# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present.
+l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}"
+
# These defaults assume forcing journald persistence, fsync to disk once
# a second, rate-limiting to 10,000 logs a second, no forwarding to
# syslog or wall, using 8GB of disk space maximum, using 10MB journal
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index bf0cbbf18..3460efec9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,5 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: lib_os_firewall
- role: openshift_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 7bfc870d5..b12a6b346 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -16,10 +16,10 @@
- name: Install Master package
package:
- name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -31,12 +31,12 @@
owner: root
group: root
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: Reload systemd units
command: systemctl daemon-reload
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: Re-gather package dependent master facts
openshift_facts:
@@ -48,7 +48,7 @@
- name: Create the policy file if it does not already exist
command: >
- {{ openshift.common.client_binary }} adm create-bootstrap-policy-file
+ {{ openshift_client_binary }} adm create-bootstrap-policy-file
--filename={{ openshift_master_policy }}
args:
creates: "{{ openshift_master_policy }}"
@@ -69,7 +69,7 @@
package: name=httpd-tools state=present
when:
- item.kind == 'HTPasswdPasswordIdentityProvider'
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
register: result
until: result is succeeded
@@ -164,7 +164,7 @@
- name: Install Master system container
include_tasks: system_container.yml
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- l_is_master_system_container | bool
- name: Create session secrets file
@@ -181,6 +181,7 @@
- restart master api
- set_fact:
+ # translate_idps is a custom filter in role lib_utils
translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 8b342a5b4..911a9bd3d 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -43,7 +43,7 @@
set_fact:
l_bind_docker_reg_auth: True
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- oreg_auth_user is defined
- >
(master_oreg_auth_credentials_stat.stat.exists
diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml
index 487fefb63..7e013a699 100644
--- a/roles/openshift_master/tasks/set_loopback_context.yml
+++ b/roles/openshift_master/tasks/set_loopback_context.yml
@@ -1,13 +1,13 @@
---
- name: Test local loopback context
command: >
- {{ openshift.common.client_binary }} config view
+ {{ openshift_client_binary }} config view
--config={{ openshift_master_loopback_config }}
changed_when: false
register: l_loopback_config
- command: >
- {{ openshift.common.client_binary }} config set-cluster
+ {{ openshift_client_binary }} config set-cluster
--certificate-authority={{ openshift_master_config_dir }}/ca.crt
--embed-certs=true --server={{ openshift.master.loopback_api_url }}
{{ openshift.master.loopback_cluster_name }}
@@ -17,7 +17,7 @@
register: set_loopback_cluster
- command: >
- {{ openshift.common.client_binary }} config set-context
+ {{ openshift_client_binary }} config set-context
--cluster={{ openshift.master.loopback_cluster_name }}
--namespace=default --user={{ openshift.master.loopback_user }}
{{ openshift.master.loopback_context_name }}
@@ -27,7 +27,7 @@
register: l_set_loopback_context
- command: >
- {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+ {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}
--config={{ openshift_master_loopback_config }}
when:
- l_set_loopback_context is changed
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index f6c5ce0dd..dcbf7fd9f 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -2,7 +2,7 @@
- name: Pre-pull master system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Pulling layer' in l_pull_result.stdout"
@@ -14,7 +14,7 @@
- name: Install or Update HA api master system container
oc_atomic_container:
name: "{{ openshift_service_type }}-master-api"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=api
@@ -22,7 +22,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
name: "{{ openshift_service_type }}-master-controllers"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 1c9ecafaa..870ab7c57 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -7,7 +7,7 @@
containerized_svc_dir: "/etc/systemd/system"
ha_svc_template_path: "docker-cluster"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- include_tasks: registry_auth.yml
@@ -30,11 +30,11 @@
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
command: >
- docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
+ docker pull {{ osm_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Downloaded newer image' in l_pull_result.stdout"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_master_system_container | bool
- name: Create the ha systemd unit files
diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml
index f84cf2f6e..f143673cf 100644
--- a/roles/openshift_master/tasks/upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade.yml
@@ -1,6 +1,6 @@
---
- include_tasks: upgrade/rpm_upgrade.yml
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- include_tasks: upgrade/upgrade_scheduler.yml
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index f72710832..4564f33dd 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -8,8 +8,25 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
+- name: Upgrade master packages - yum
+ command:
+ yum install -y {{ master_pkgs | join(' ') }} \
+ {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
+ vars:
+ master_pkgs:
+ - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
+ register: result
+ until: result is succeeded
+ when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+ dnf:
+ name: "{{ master_pkgs | join(',') }}"
+ state: present
vars:
master_pkgs:
- "{{ openshift_service_type }}{{ openshift_pkg_version }}"
@@ -17,6 +34,6 @@
- "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result is succeeded
+ when: ansible_pkg_mgr == 'dnf'
diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
index 8558bf3e9..995a5ab70 100644
--- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
+++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
@@ -1,6 +1,8 @@
---
# Upgrade predicates
- vars:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 3f7a528a9..4c68155ea 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 5e46d9121..a56c0340c 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -21,7 +21,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ {{ osm_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 899575f1a..79171d511 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ {{ osm_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index f1a76e5f5..14023ea73 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,6 +1,6 @@
admissionConfig:
{% if 'admission_plugin_config' in openshift.master %}
- pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }}
+ pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }}
{% endif %}
apiLevels:
- v1
@@ -16,13 +16,13 @@ assetConfig:
metricsPublicURL: {{ openshift_hosted_metrics_deploy_url }}
{% endif %}
{% if 'extension_scripts' in openshift.master %}
- extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }}
+ extensionScripts: {{ openshift.master.extension_scripts | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
{% if 'extension_stylesheets' in openshift.master %}
- extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }}
+ extensionStylesheets: {{ openshift.master.extension_stylesheets | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
{% if 'extensions' in openshift.master %}
- extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }}
+ extensions: {{ openshift.master.extensions | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
@@ -42,7 +42,7 @@ assetConfig:
{% endfor %}
{% endif %}
{% if openshift.master.audit_config | default(none) is not none %}
-auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
+auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }}
{% endif %}
controllerConfig:
election:
@@ -85,7 +85,7 @@ imageConfig:
format: {{ openshift.master.registry_url }}
latest: {{ openshift_master_image_config_latest }}
{% if 'image_policy_config' in openshift.master %}
-imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}
+imagePolicyConfig:{{ openshift.master.image_policy_config | lib_utils_to_padded_yaml(level=1) }}
{% endif %}
kind: MasterConfig
kubeletClientInfo:
@@ -96,21 +96,21 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
- apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+ apiServerArguments: {{ openshift.master.api_server_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
storage-backend:
- etcd3
storage-media-type:
- application/vnd.kubernetes.protobuf
{% endif %}
- controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
+ controllerArguments: {{ openshift.master.controller_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
- schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
+ schedulerArguments: {{ openshift_master_scheduler_args | default(None) | lib_utils_to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"
servicesSubnet: {{ openshift.common.portal_net }}
@@ -144,7 +144,7 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
- externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
+ externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | lib_utils_to_padded_yaml(1,2) }}
{% if openshift_master_ingress_ip_network_cidr is defined %}
ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}
{% endif %}
@@ -152,8 +152,8 @@ oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
{% endif %}
-{% if 'oauth_templates' in openshift.master %}
- templates:{{ openshift.master.oauth_templates | to_padded_yaml(level=2) }}
+{% if l_openshift_master_oauth_templates %}
+ templates:{{ l_openshift_master_oauth_templates | lib_utils_to_padded_yaml(level=2) }}
{% endif %}
assetPublicURL: {{ openshift.master.public_console_url }}/
grantConfig:
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index cc21b37af..bff32b2e3 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 493fc510e..b8a519baa 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index 300b2cbff..e7d9f5bba 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index ec1fbb1ee..ce27e238f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -27,7 +27,7 @@
master_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool
else (False in (g_master_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure the generated_configs directory present
@@ -47,11 +47,11 @@
- name: Create the master server certificate
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
@@ -64,16 +64,16 @@
--overwrite=false
when: item != openshift_ca_host
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- name: Generate the loopback master client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority={{ openshift_ca_cert }}
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
@@ -89,8 +89,8 @@
args:
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
when: item != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
run_once: true
@@ -101,6 +101,7 @@
state: hard
force: true
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
@@ -120,7 +121,11 @@
register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
- become: no
+
+- name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_master_certs_mktemp.stdout }}"
+ changed_when: False
+ when: master_certs_missing | bool
- name: Create a tarball of the master certs
command: >
@@ -157,7 +162,6 @@
local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent
changed_when: False
when: master_certs_missing | bool
- become: no
- name: Lookup default group for ansible_ssh_user
command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
diff --git a/roles/openshift_master_facts/filter_plugins/oo_filters.py b/roles/openshift_master_facts/filter_plugins/oo_filters.py
deleted file mode 120000
index 6f9bc47c1..000000000
--- a/roles/openshift_master_facts/filter_plugins/oo_filters.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/oo_filters.py \ No newline at end of file
diff --git a/roles/openshift_master_facts/meta/main.yml b/roles/openshift_master_facts/meta/main.yml
index 9dbf719f8..0ab2311d3 100644
--- a/roles/openshift_master_facts/meta/main.yml
+++ b/roles/openshift_master_facts/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 418dcba67..f450c916a 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -15,7 +15,7 @@
set_fact:
g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
| default('hawkular-metrics.' ~ openshift_master_default_subdomain)
- | oo_hostname_from_url }}"
+ | lib_utils_oo_hostname_from_url }}"
- set_fact:
openshift_hosted_metrics_deploy_url: "https://{{ g_metrics_hostname }}/hawkular/metrics"
@@ -57,6 +57,7 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ # oo_htpasswd_users_from_file is a custom filter in role lib_utils
htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}"
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
@@ -72,11 +73,8 @@
controller_args: "{{ osm_controller_args | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
master_count: "{{ openshift_master_count | default(None) }}"
- master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
- oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}"
@@ -93,6 +91,8 @@
- name: Set Default scheduler predicates and priorities
set_fact:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index 50214135c..675ec112f 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -15,5 +15,6 @@ galaxy_info:
categories:
- openshift
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index bb842d710..b71e35263 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 0fd19c9f8..9395fceca 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -14,7 +14,7 @@
changed_when: no
- name: generate password for hawkular metrics
- local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | lib_utils_oo_random_word }}"
with_items:
- hawkular-metrics
become: false
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 48584bd64..9026cc897 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -1,6 +1,6 @@
---
- shell: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0
vars:
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index a4ffa1890..f45e7a042 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc hawkular-metrics -o jsonpath='{.spec.replicas}'
register: hawkular_metrics_replica_count
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index a33b28ba7..73e7454f0 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc heapster -o jsonpath='{.spec.replicas}'
register: heapster_replica_count
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 49d1d8cf1..4a63d081e 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -67,10 +67,27 @@
with_items: "{{ hawkular_agent_object_defs.results }}"
when: openshift_metrics_install_hawkular_agent | bool
+# TODO: Remove when asset config is removed from master-config.yaml
- include_tasks: update_master_config.yaml
+# Update asset config in openshift-web-console namespace
+- name: Add metrics route information to web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_console_config.yml
+ vars:
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
+ value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
+ - key: metricsPublicURL
+ value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ when: openshift_web_console_install | default(true) | bool
+
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 9dfe360bb..b67077bca 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -9,10 +9,10 @@
- "'not installed' not in passlib_result.stdout"
msg: "python-passlib rpm must be installed on control host"
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: Set metrics image facts
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 1e1af40e8..057963c1a 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -1,7 +1,7 @@
---
- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
@@ -12,21 +12,25 @@
- name: Applying {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when: "'error' in version_changed.stderr"
+ failed_when:
+ - "'error' in version_changed.stderr"
+ - "version_changed.rc != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index d6756f9b9..976763236 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -14,7 +14,7 @@
- name: list existing secrets
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get secrets -o name
register: metrics_secrets
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 2d880f4d6..223bd975e 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
+ {{ openshift_client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
@@ -23,7 +23,7 @@
- name: generate random password for the {{ component }} keystore
copy:
- content: "{{ 15 | oo_random_word }}"
+ content: "{{ 15 | lib_utils_oo_random_word }}"
dest: '{{ mktemp.stdout }}/{{ component }}-keystore.pwd'
- slurp: src={{ mktemp.stdout | quote }}/{{ component|quote }}-keystore.pwd
@@ -39,5 +39,5 @@
- name: generate random password for the {{ component }} truststore
copy:
- content: "{{ 15 | oo_random_word }}"
+ content: "{{ 15 | lib_utils_oo_random_word }}"
dest: '{{ mktemp.stdout | quote }}/{{ component|quote }}-truststore.pwd'
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index 2037e8dc3..899251727 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-cassandra
@@ -23,7 +23,7 @@
changed_when: metrics_cassandra_rc | length > 0
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-metrics
@@ -45,7 +45,7 @@
changed_when: metrics_metrics_rc | length > 0
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=heapster
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index 9a2ce9267..4b1d7119d 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=heapster
@@ -22,7 +22,7 @@
loop_var: object
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-metrics
@@ -44,7 +44,7 @@
changed_when: metrics_hawkular_rc | length > 0
- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
+ {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
get rc
-o name
-l metrics-infra=hawkular-cassandra
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
index 42ed02460..ae3306496 100644
--- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -1,7 +1,7 @@
---
- name: remove Hawkular Agent (HOSA) components
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra=agent
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
register: delete_metrics
@@ -9,7 +9,7 @@
- name: remove rolebindings
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found
clusterrolebinding/hawkular-openshift-agent-rb
changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 1265c7bfd..610c7b4e5 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -4,7 +4,7 @@
- name: remove metrics components
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
register: delete_metrics
@@ -12,9 +12,20 @@
- name: remove rolebindings
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
+
+# Update asset config in openshift-web-console namespace
+- name: Remove metrics route information from web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_asset_config.yml
+ vars:
+ asset_config_edits:
+ - key: metricsPublicURL
+ value: ""
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
index 5059d8d94..6567fcb4f 100644
--- a/roles/openshift_metrics/tasks/update_master_config.yaml
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -1,4 +1,5 @@
---
+# TODO: Remove when asset config is removed from master-config.yaml
- name: Adding metrics route information to metricsPublicURL
modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index e976bc222..7c75b2f97 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -64,7 +64,7 @@ spec:
- name: MASTER_URL
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
- value: "{{ 17 | oo_random_word }}"
+ value: "{{ 17 | lib_utils_oo_random_word }}"
- name: TRUSTSTORE_AUTHORITIES
value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: ENABLE_PROMETHEUS_ENDPOINT
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
deleted file mode 100644
index 6ed6d404c..000000000
--- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use with openshift named certificates
-'''
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use with openshift named certificates'''
-
- @staticmethod
- def oo_named_certificates_list(named_certificates):
- ''' Returns named certificates list with correct fields for the master
- config file.'''
- return [{'certFile': named_certificate['certfile'],
- 'keyFile': named_certificate['keyfile'],
- 'names': named_certificate['names']} for named_certificate in named_certificates]
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_named_certificates/meta/main.yml b/roles/openshift_named_certificates/meta/main.yml
index 2c6e12494..e7d81df53 100644
--- a/roles/openshift_named_certificates/meta/main.yml
+++ b/roles/openshift_named_certificates/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml
index 1bcf9ef67..021fa8385 100644
--- a/roles/openshift_named_certificates/tasks/main.yml
+++ b/roles/openshift_named_certificates/tasks/main.yml
@@ -1,9 +1,8 @@
---
- set_fact:
- parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"
+ parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"
when: named_certificates | length > 0
delegate_to: localhost
- become: no
run_once: true
- openshift_facts:
@@ -43,4 +42,4 @@
src: "{{ item }}"
dest: "{{ named_certs_dir }}/{{ item | basename }}"
mode: 0600
- with_items: "{{ named_certificates | oo_collect('cafile') }}"
+ with_items: "{{ named_certificates | lib_utils_oo_collect('cafile') }}"
diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml
index d7b5910f2..17c0cf33f 100644
--- a/roles/openshift_nfs/meta/main.yml
+++ b/roles/openshift_nfs/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index 5fcdbf76e..331685289 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -3,7 +3,7 @@
#
# Include signature
#
-# include_role:
+# import_role:
# role: openshift_nfs
# tasks_from: create_export
# vars:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index fff927944..0b10413c5 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,6 +1,70 @@
---
openshift_node_debug_level: "{{ debug_level | default(2) }}"
-
+openshift_node_iptables_sync_period: '30s'
+osn_storage_plugin_deps:
+- ceph
+- glusterfs
+- iscsi
+openshift_node_local_quota_per_fsgroup: ""
+openshift_node_proxy_mode: iptables
+openshift_set_node_ip: False
+openshift_config_base: '/etc/origin'
+
+openshift_oreg_url_default_dict:
+ origin: "openshift/origin-${component}:${version}"
+ openshift-enterprise: "openshift3/ose-${component}:${version}"
+openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}"
+oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}"
+
+osn_ovs_image_default_dict:
+ origin: "openshift/openvswitch"
+ openshift-enterprise: "openshift3/openvswitch"
+osn_ovs_image_default: "{{ osn_ovs_image_default_dict[openshift_deployment_type] }}"
+osn_ovs_image: "{{ osn_ovs_image_default }}"
+
+openshift_dns_ip: "{{ ansible_default_ipv4['address'] }}"
+
+openshift_node_env_vars: {}
+
+# Create list of 'k=v' pairs.
+l_node_kubelet_node_labels: "{{ openshift_node_labels | default({}) | lib_utils_oo_dict_to_keqv_list }}"
+
+openshift_node_kubelet_args_dict:
+ aws:
+ cloud-provider:
+ - aws
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/aws.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ openstack:
+ cloud-provider:
+ - openstack
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/openstack.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ gce:
+ cloud-provider:
+ - gce
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ azure:
+ cloud-provider:
+ - azure
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ undefined:
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+
+l_node_kubelet_args_default: "{{ openshift_node_kubelet_args_dict[openshift_cloudprovider_kind | default('undefined')] }}"
+
+l_openshift_node_kubelet_args: "{{ openshift_node_kubelet_args | default({}) }}"
+# Combine the default kubelet_args dictionary (based on cloud provider, if provided)
+# with user-supplied openshift_node_kubelet_args.
+# openshift_node_kubelet_args will override the defaults, if keys and/or subkeys
+# are present in both.
+l2_openshift_node_kubelet_args: "{{ l_node_kubelet_args_default | combine(l_openshift_node_kubelet_args, recursive=True) }}"
openshift_node_dnsmasq_install_network_manager_hook: true
# lo must always be present in this list or dnsmasq will conflict with
@@ -14,10 +78,15 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+
+openshift_node_image_dict:
+ origin: 'openshift/node'
+ openshift-enterprise: 'openshift3/node'
+osn_image: "{{ openshift_node_image_dict[openshift_deployment_type] }}"
+
openshift_service_type_dict:
origin: origin
openshift-enterprise: atomic-openshift
-
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
@@ -106,9 +175,9 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
openshift_use_crio: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False) | bool) }}"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
# NOTE
# r_openshift_node_*_default may be defined external to this role.
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 62e0e1341..779916335 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
pause: seconds=15
when:
- (not skip_node_svc_handlers | default(False) | bool)
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: restart node
systemd:
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 70057c7f3..59e743dce 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,12 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_node_facts
- when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_openshift
-- role: lib_os_firewall
- when: not (openshift_node_upgrade_in_progress | default(False))
-- role: openshift_cloud_provider
- when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_utils
- when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 8a55cd428..1103fe4c9 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -4,7 +4,7 @@
- name: Pull container images
include_tasks: container_images.yml
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Start and enable openvswitch service
systemd:
@@ -13,7 +13,7 @@
state: started
daemon_reload: yes
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not (ovs_start_result is failed)
@@ -24,9 +24,9 @@
ovs_service_status_changed: "{{ ovs_start_result is changed }}"
- file:
- dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ dest: "{{ l2_openshift_node_kubelet_args['config'] }}"
state: directory
- when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+ when: ('config' in l2_openshift_node_kubelet_args) | bool
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
@@ -46,7 +46,7 @@
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
- with_dict: "{{ openshift.node.env_vars | default({}) }}"
+ with_dict: "{{ openshift_node_env_vars }}"
notify:
- restart node
@@ -58,7 +58,7 @@
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
- when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not openshift_node_bootstrap
block:
- name: Wait for master API to become available before proceeding
diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml
index 0b8c806ae..bb788e2f1 100644
--- a/roles/openshift_node/tasks/container_images.yml
+++ b/roles/openshift_node/tasks/container_images.yml
@@ -12,7 +12,7 @@
- name: Pre-pull openvswitch image
command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
when:
diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml
index 0c8857b11..5e06ba032 100644
--- a/roles/openshift_node/tasks/dnsmasq_install.yml
+++ b/roles/openshift_node/tasks/dnsmasq_install.yml
@@ -12,7 +12,7 @@
- name: Install dnsmasq
package: name=dnsmasq state=installed
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index b1fcf4068..a4a9c1237 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,35 +1,25 @@
---
-- when: not openshift.common.is_containerized | bool
- block:
- - name: Install Node package
- package:
- name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- register: result
- until: result is succeeded
-
- - name: Install sdn-ovs package
- package:
- name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - openshift_node_use_openshift_sdn | bool
- register: result
- until: result is succeeded
-
- - name: Install conntrack-tools package
- package:
- name: "conntrack-tools"
- state: present
- register: result
- until: result is succeeded
+- name: Install Node package, sdn-ovs, conntrack packages
+ package:
+ name: "{{ item.name }}"
+ state: present
+ register: result
+ until: result is succeeded
+ with_items:
+ - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ install: "{{ openshift_node_use_openshift_sdn | bool }}"
+ - name: "conntrack-tools"
+ when:
+ - not openshift_is_containerized | bool
+ - item['install'] | default(True) | bool
- when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8bd8f2536..754ecacaf 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -3,8 +3,8 @@
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled')
- - deployment_type == 'openshift-enterprise'
- - not openshift_use_crio
+ - openshift_deployment_type == 'openshift-enterprise'
+ - not openshift_use_crio | bool
- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
@@ -50,7 +50,7 @@
name: cri-o
enabled: yes
state: restarted
- when: openshift_use_crio
+ when: openshift_use_crio | bool
register: task_result
failed_when:
- task_result is failed
@@ -85,21 +85,17 @@
- name: GlusterFS storage plugin configuration
include_tasks: storage_plugins/glusterfs.yml
- when: "'glusterfs' in openshift.node.storage_plugin_deps"
+ when: "'glusterfs' in osn_storage_plugin_deps"
- name: Ceph storage plugin configuration
include_tasks: storage_plugins/ceph.yml
- when: "'ceph' in openshift.node.storage_plugin_deps"
+ when: "'ceph' in osn_storage_plugin_deps"
- name: iSCSI storage plugin configuration
include_tasks: storage_plugins/iscsi.yml
- when: "'iscsi' in openshift.node.storage_plugin_deps"
+ when: "'iscsi' in osn_storage_plugin_deps"
##### END Storage #####
- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
-
-- name: include bootstrap node config
- include_tasks: bootstrap.yml
- when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 98978ec6f..06b879050 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -2,14 +2,14 @@
- name: Pre-pull node system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update node system container
oc_atomic_container:
name: "{{ openshift_service_type }}-node"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index b61bc84c1..d7dce6969 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,22 +1,22 @@
---
- set_fact:
l_service_name: "cri-o"
- when: openshift_use_crio
+ when: openshift_use_crio | bool
- set_fact:
l_service_name: "{{ openshift_docker_service_name }}"
- when: not openshift_use_crio
+ when: not openshift_use_crio | bool
- name: Pre-pull OpenVSwitch system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update OpenVSwitch system container
oc_atomic_container:
name: openvswitch
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}"
state: latest
values:
- "DOCKER_SERVICE={{ l_service_name }}"
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index ab43ec049..92650e6b7 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -41,7 +41,7 @@
set_fact:
l_bind_docker_reg_auth: True
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- oreg_auth_user is defined
- >
(node_oreg_auth_credentials_stat.stat.exists
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index 52d80357e..e30f58a9a 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -1,6 +1,6 @@
---
- name: Install Ceph storage plugin dependencies
package: name=ceph-common state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index e60f57ae7..c04a6922a 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -1,7 +1,7 @@
---
- name: Install GlusterFS storage plugin dependencies
package: name=glusterfs-fuse state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index d3a3668d5..a8048c42f 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,6 +1,6 @@
---
- name: Install iSCSI storage plugin dependencies
package: name=iscsi-initiator-utils state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 1484aa076..c2922644f 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -1,7 +1,7 @@
---
- name: Install NFS storage plugin dependencies
package: name=nfs-utils state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 262ee698b..e33a4999f 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -2,13 +2,13 @@
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
- src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
-- when: openshift.common.is_containerized | bool
+- when: openshift_is_containerized | bool
block:
- name: include node deps docker service file
include_tasks: config/install-node-deps-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index f0a013e45..02e417937 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -1,11 +1,10 @@
---
# input variables:
# - l_docker_upgrade
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# - node_config_hook
# - openshift_pkg_version
-# - openshift.common.is_containerized
-# - deployment_type
+# - openshift_is_containerized
# - openshift_release
# tasks file for openshift_node_upgrade
@@ -26,7 +25,7 @@
include_tasks: upgrade/rpm_upgrade_install.yml
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- include_tasks: "{{ node_config_hook }}"
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
index 439700df6..dd9183382 100644
--- a/roles/openshift_node/tasks/upgrade/config_changes.yml
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -1,7 +1,7 @@
---
- name: Update systemd units
include_tasks: ../systemd_units.yml
- when: openshift.common.is_containerized
+ when: openshift_is_containerized | bool
- name: Update oreg value
yedit:
@@ -21,6 +21,12 @@
path: "/var/lib/dockershim/sandbox/"
state: absent
+# https://bugzilla.redhat.com/show_bug.cgi?id=1518912
+- name: Clean up IPAM data
+ file:
+ path: "/var/lib/cni/networks/openshift-sdn/"
+ state: absent
+
# Disable Swap Block (pre)
- block:
- name: Remove swap entries from /etc/fstab
@@ -60,6 +66,7 @@
dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
+ when: not openshift_is_containerized | bool
- name: Reset selinux context
command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
@@ -74,4 +81,3 @@
# require a service to be part of the call.
- name: Reload systemd units
command: systemctl daemon-reload
- when: l_node_unit is changed
diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
index 71f00dcd2..e5477f389 100644
--- a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
@@ -1,15 +1,15 @@
---
- name: Pre-pull node image
command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- name: Pre-pull openvswitch image
command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift_use_openshift_sdn | bool
+ when: openshift_node_use_openshift_sdn | bool
- include_tasks: ../container_images.yml
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 45b0be0a0..bd6f42182 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,7 +1,7 @@
---
# input variables:
# - openshift_service_type
-# - openshift.common.is_containerized
+# - openshift_is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index cc9a8f2d9..d4b47bb9e 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -3,7 +3,7 @@
# - openshift_service_type
# - component
# - openshift_pkg_version
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# Pre-pull new node rpm, but don't install
- name: download new node packages
@@ -12,7 +12,7 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "dnsmasq"
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
index 32eeb76c6..ef5d8d662 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -3,7 +3,7 @@
# - openshift_service_type
# - component
# - openshift_pkg_version
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# Install the pre-pulled RPM
# Note: dnsmasq is covered in it's own play. openvswitch is included here
@@ -14,6 +14,6 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "openvswitch"
diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml
index 2fff556e5..6d92516c3 100644
--- a/roles/openshift_node/tasks/upgrade/stop_services.yml
+++ b/roles/openshift_node/tasks/upgrade/stop_services.yml
@@ -19,7 +19,7 @@
- "{{ openshift_service_type }}-master-controllers"
- "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- service:
name: docker
@@ -40,4 +40,4 @@
- "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml
index 7f591996c..3ae7dc6b6 100644
--- a/roles/openshift_node/tasks/upgrade_pre.yml
+++ b/roles/openshift_node/tasks/upgrade_pre.yml
@@ -11,7 +11,7 @@
command: "{{ ansible_pkg_mgr }} makecache"
register: result
until: result is succeeded
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
@@ -26,7 +26,7 @@
- l_docker_upgrade | bool
- include_tasks: upgrade/containerized_upgrade_pull.yml
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
# Prepull the rpms for docker upgrade, but don't install
- name: download docker upgrade rpm
@@ -40,7 +40,7 @@
- include_tasks: upgrade/rpm_upgrade.yml
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index da751bd65..777f4a449 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -8,7 +8,7 @@ Wants={{ openshift_docker_service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 261cac6f1..5f2a94ea2 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -5,18 +5,16 @@ dnsBindAddress: 127.0.0.1:53
dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
{% endif %}
dnsDomain: {{ openshift.common.dns_domain }}
-{% if 'dns_ip' in openshift.node %}
-dnsIP: {{ openshift.node.dns_ip }}
-{% endif %}
+dnsIP: {{ openshift_dns_ip }}
dockerConfig:
execHandlerName: ""
-iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
+iptablesSyncPeriod: "{{ openshift_node_iptables_sync_period }}"
imageConfig:
- format: {{ openshift.node.registry_url }}
+ format: {{ oreg_url_node }}
latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
-kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift_use_crio %}
+kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | lib_utils_to_padded_yaml(level=1) }}
+{% if openshift_use_crio | bool %}
container-runtime:
- remote
container-runtime-endpoint:
@@ -45,7 +43,7 @@ networkConfig:
{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
-{% if openshift.node.set_node_ip | bool %}
+{% if openshift_set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
{% endif %}
nodeName: {{ openshift.node.nodename }}
@@ -68,8 +66,8 @@ volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- - {{ openshift.node.proxy_mode }}
+ - {{ openshift_node_proxy_mode }}
{% endif %}
volumeConfig:
localQuota:
- perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
+ perFSGroup: {{ openshift_node_local_quota_per_fsgroup }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8b43beb07..9fe779057 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -3,9 +3,15 @@ Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_service_type }}-node.service
Before={{ openshift_service_type }}-node.service
-{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \
+ then echo DOCKER_ADDTL_BIND_MOUNTS=\"--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro \
+ --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro \
+ --volume=/etc/containers/registries:/etc/containers/registries:ro \
+ {% if l_bind_docker_reg_auth %} --volume={{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\" > \
+ /etc/sysconfig/{{ openshift_service_type }}-node-dep; \
+ else echo "#DOCKER_ADDTL_BIND_MOUNTS=" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi'
ExecStop=
SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index b174c7023..ae7b147a6 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -38,7 +38,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
{% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \
-v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ {{ osn_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 37f091c76..1fc9b6e72 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -6,7 +6,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
ExecStartPre=-/usr/bin/docker rm -f openvswitch
-ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ osn_ovs_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 5
ExecStop=/usr/bin/docker stop openvswitch
SyslogIdentifier=openvswitch
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
index b42b75be9..da1570528 100644
--- a/roles/openshift_node_certificates/defaults/main.yml
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -2,4 +2,4 @@
openshift_node_cert_expire_days: 730
openshift_ca_host: ''
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index 0440bf11a..4362c644a 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 97f1fbbdd..5f73f3bdc 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -31,7 +31,7 @@
node_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool
else (False in (g_node_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Create openshift_generated_configs_dir if it does not exist
@@ -51,11 +51,11 @@
- name: Generate the node client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
- {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
+ {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
@@ -70,14 +70,14 @@
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- name: Generate the node server certificate
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
--cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
--key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
--expire-days={{ openshift_node_cert_expire_days }}
@@ -89,18 +89,11 @@
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: node_cert_mktemp
- changed_when: False
- when: node_certs_missing | bool
- become: no
-
- name: Create a tarball of the node config directories
command: >
tar -czvf {{ openshift_node_generated_config_dir }}.tgz
@@ -117,8 +110,7 @@
- name: Retrieve the node config tarballs from the master
fetch:
src: "{{ openshift_node_generated_config_dir }}.tgz"
- dest: "{{ node_cert_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: node_certs_missing | bool
@@ -132,15 +124,14 @@
- name: Unarchive the tarball on the node
unarchive:
- src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ openshift_node_generated_config_dir }}.tgz"
dest: "{{ openshift_node_cert_dir }}"
when: node_certs_missing | bool
- name: Delete local temp directory
- local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
changed_when: False
when: node_certs_missing | bool
- become: no
- name: Copy OpenShift CA to system CA trust
copy:
diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
deleted file mode 100644
index 69069f2dc..000000000
--- a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift-node
-'''
-from ansible import errors
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use by openshift_node_facts role'''
-
- @staticmethod
- def node_get_dns_ip(openshift_dns_ip, hostvars):
- ''' Navigates the complicated logic of when to set dnsIP
-
- In all situations if they've set openshift_dns_ip use that
- For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
- For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
- For 1.2/3.2+ installs we set to the node's default interface ip
- '''
-
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- # We always use what they've specified if they've specified a value
- if openshift_dns_ip is not None:
- return openshift_dns_ip
- return hostvars['ansible_default_ipv4']['address']
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {'node_get_dns_ip': self.node_get_dns_ip}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
deleted file mode 100644
index c234a3000..000000000
--- a/roles/openshift_node_facts/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Set node facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ openshift_node_labels | default(None) }}"
- registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index 7c81409a5..cccdea66f 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -17,7 +17,13 @@ openshift_node_group_edits: []
openshift_node_group_namespace: openshift-node
openshift_node_group_labels: []
-openshift_imageconfig_format: "{{ oreg_url if oreg_url is defined else openshift.node.registry_url }}"
+openshift_oreg_url_default_dict:
+ origin: "openshift/origin-${component}:${version}"
+ openshift-enterprise: openshift3/ose-${component}:${version}
+openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}"
+oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}"
+
+openshift_imageconfig_format: "{{ oreg_url_node }}"
openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 65a647b8f..77be1f2b1 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -8,6 +8,7 @@ openshift_openstack_num_etcd: 0
openshift_openstack_num_masters: 1
openshift_openstack_num_nodes: 1
openshift_openstack_num_infra: 1
+openshift_openstack_num_cns: 0
openshift_openstack_dns_nameservers: []
openshift_openstack_nodes_to_remove: []
@@ -57,6 +58,7 @@ openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshif
openshift_openstack_subnet_prefix: "192.168.99"
openshift_openstack_master_hostname: master
openshift_openstack_infra_hostname: infra-node
+openshift_openstack_cns_hostname: cns
openshift_openstack_node_hostname: app-node
openshift_openstack_lb_hostname: lb
openshift_openstack_etcd_hostname: etcd
@@ -66,8 +68,10 @@ openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}"
+openshift_openstack_cns_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}"
+openshift_openstack_cns_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}"
@@ -84,6 +88,7 @@ openshift_openstack_infra_server_group_policies: []
openshift_openstack_docker_volume_size: 15
openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}"
+openshift_openstack_cns_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_etcd_volume_size: 2
openshift_openstack_lb_volume_size: 5
diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml
index 30996cc47..1e487d434 100644
--- a/roles/openshift_openstack/tasks/check-prerequisites.yml
+++ b/roles/openshift_openstack/tasks/check-prerequisites.yml
@@ -91,6 +91,7 @@
with_items:
- "{{ openshift_openstack_master_image }}"
- "{{ openshift_openstack_infra_image }}"
+ - "{{ openshift_openstack_cns_image }}"
- "{{ openshift_openstack_node_image }}"
- "{{ openshift_openstack_lb_image }}"
- "{{ openshift_openstack_etcd_image }}"
@@ -100,6 +101,7 @@
with_items:
- "{{ openshift_openstack_master_flavor }}"
- "{{ openshift_openstack_infra_flavor }}"
+ - "{{ openshift_openstack_cns_flavor }}"
- "{{ openshift_openstack_node_flavor }}"
- "{{ openshift_openstack_lb_flavor }}"
- "{{ openshift_openstack_etcd_flavor }}"
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 8d13eb81e..1be5d3a62 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -419,6 +419,46 @@ resources:
port_range_min: 443
port_range_max: 443
+ cns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-cns-secgrp
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cns cluster nodes
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ rules:
+ # glusterfs_sshd
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2222
+ port_range_max: 2222
+ # heketi dialing backends
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ # glusterfs_management
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24007
+ port_range_max: 24007
+ # glusterfs_rdma
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24008
+ port_range_max: 24008
+ # glusterfs_bricks
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 49152
+ port_range_max: 49251
+
{% if openshift_openstack_num_masters|int > 1 %}
lb-secgrp:
type: OS::Neutron::SecurityGroup
@@ -764,3 +804,58 @@ resources:
depends_on:
- interface
{% endif %}
+
+ cns:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ openshift_openstack_num_cns }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: sub_type_k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ sub_type_k8s_type: {{ openshift_openstack_cns_hostname }}
+ cluster_env: {{ openshift_openstack_public_dns_domain }}
+ cluster_id: {{ openshift_openstack_stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: cns
+ cluster_id: {{ openshift_openstack_stack_name }}
+ type: cns
+ image: {{ openshift_openstack_cns_image }}
+ flavor: {{ openshift_openstack_cns_flavor }}
+ key_name: {{ openshift_openstack_keypair_name }}
+{% if openshift_openstack_provider_network_name %}
+ net: {{ openshift_openstack_provider_network_name }}
+ net_name: {{ openshift_openstack_provider_network_name }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net: true
+ data_net: { get_resource: data_net }
+ data_subnet: { get_resource: data_subnet }
+{% endif %}
+{% endif %}
+ secgrp:
+{% if openshift_openstack_flat_secgrp|default(False)|bool %}
+ - { get_resource: flat-secgrp }
+{% else %}
+ - { get_resource: node-secgrp }
+{% endif %}
+ - { get_resource: cns-secgrp }
+ - { get_resource: common-secgrp }
+{% if not openshift_openstack_provider_network_name %}
+ floating_network: {{ openshift_openstack_external_network_name }}
+{% endif %}
+ volume_size: {{ openshift_openstack_cns_volume_size }}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index a829da34f..1e73c9e1c 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -212,6 +212,9 @@ resources:
host-type: { get_param: type }
sub-host-type: { get_param: subtype }
node_labels: { get_param: node_labels }
+{% if openshift_openstack_dns_nameservers %}
+ openshift_hostname: { get_param: name }
+{% endif %}
scheduler_hints: { get_param: scheduler_hints }
{% if use_trunk_ports|default(false)|bool %}
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 48b0699ab..aea7616bf 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index 0b4dd7d1f..b1d9c8cca 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -26,7 +26,8 @@
when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
- name: create standard pv and pvc lists
- # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ # generate_pv_pvcs_list is a custom action module defined in
+ # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
generate_pv_pvcs_list: {}
register: l_pv_pvcs_list
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
index 346605ff7..865269b7a 100644
--- a/roles/openshift_persistent_volumes/tasks/pv.yml
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -8,10 +8,10 @@
- name: Create PersistentVolumes
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/persistent-volumes.yml
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pv_create_output
when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0"
changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
index e44f9b18f..6c12d128c 100644
--- a/roles/openshift_persistent_volumes/tasks/pvc.yml
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -8,10 +8,10 @@
- name: Create PersistentVolumeClaims
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/persistent-volume-claims.yml
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pvc_create_output
when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0"
changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
index d40417a9a..fac589a92 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
@@ -8,7 +8,7 @@ items:
metadata:
name: "{{ claim.name }}"
spec:
- accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }}
+ accessModes: {{ claim.access_modes | lib_utils_to_padded_yaml(2, 2) }}
resources:
requests:
storage: "{{ claim.capacity }}"
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 9ec14208b..354561432 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -16,6 +16,6 @@ items:
spec:
capacity:
storage: "{{ volume.capacity }}"
- accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
- {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
+ accessModes: {{ volume.access_modes | lib_utils_to_padded_yaml(2, 2) }}
+ {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | lib_utils_to_padded_yaml(3, 2) }}
{% endfor %}
diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml
index c31ee5795..3403840fb 100644
--- a/roles/openshift_project_request_template/tasks/main.yml
+++ b/roles/openshift_project_request_template/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Generate default project template
command: |
- {{ openshift.common.client_binary | quote }} \
+ {{ openshift_client_binary | quote }} \
--config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \
--output yaml \
adm create-bootstrap-project-template \
@@ -28,7 +28,7 @@
- name: Create or update project request template
command: |
- {{ openshift.common.client_binary }} \
+ {{ openshift_client_binary }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
--namespace {{ openshift_project_request_template_namespace | quote }} \
apply --filename {{ mktemp.stdout }}
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
index 33188bb7e..69c5e0ee2 100644
--- a/roles/openshift_prometheus/meta/main.yaml
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -15,5 +15,6 @@ galaxy_info:
categories:
- openshift
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index abc5dd476..749df5152 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -5,7 +5,7 @@
oc_project:
state: present
name: "{{ openshift_prometheus_namespace }}"
- node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ node_selector: "{{ openshift_prometheus_node_selector | lib_utils_oo_selector_to_string_list() }}"
description: Prometheus
# secrets
@@ -16,7 +16,7 @@
namespace: "{{ openshift_prometheus_namespace }}"
contents:
- path: session_secret
- data: "{{ 43 | oo_random_word }}="
+ data: "{{ 43 | lib_utils_oo_random_word }}="
with_items:
- prometheus
- alerts
@@ -39,7 +39,7 @@
# TODO remove this when annotations are supported by oc_serviceaccount
- name: annotate serviceaccount
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
serviceaccount prometheus
serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
@@ -97,7 +97,7 @@
# TODO remove this when annotations are supported by oc_service
- name: annotate prometheus service
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
service prometheus
prometheus.io/scrape='true'
prometheus.io/scheme=https
@@ -105,7 +105,7 @@
- name: annotate alerts service
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
# create prometheus and alerts routes
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index 38798e1f5..b859eb111 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- "{{ openshift_deployment_type }}.yml"
diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml
index cb9278eb7..5ef352bcd 100644
--- a/roles/openshift_provisioners/meta/main.yaml
+++ b/roles/openshift_provisioners/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index e543d753c..de763f6cf 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -1,7 +1,7 @@
---
- name: Check efs current replica count
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
-o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
register: efs_replica_count
when: not ansible_check_mode
@@ -58,7 +58,7 @@
# anyuid in order to run as root & chgrp shares with allocated gids
- name: "Check efs anyuid permissions"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
get scc/anyuid -o jsonpath='{.users}'
register: efs_anyuid
check_mode: no
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index 49d03f203..239e1f1cc 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -1,7 +1,7 @@
---
- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
@@ -11,16 +11,18 @@
- name: Applying {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
@@ -32,20 +34,24 @@
- name: Removing previous {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
+ failed_when:
+ - "'error' in generation_delete.stderr"
+ - "generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
- name: Recreating {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
index 602dee773..ac12087ec 100644
--- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -5,7 +5,7 @@
# delete the deployment objects that we had created
- name: delete provisioner api objects
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- dc
@@ -15,7 +15,7 @@
# delete our old secrets
- name: delete provisioner secrets
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- provisioners-efs
@@ -26,7 +26,7 @@
# delete cluster role bindings
- name: delete cluster role bindings
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- run-provisioners-efs
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 2ada20767..911005bb6 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -40,9 +40,9 @@
- include_tasks: rhel_repos.yml
when:
- ansible_distribution == 'RedHat'
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- rhsub_user is defined
- - rhsub_password is defined
+ - rhsub_pass is defined
- include_tasks: centos_repos.yml
when:
diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml
index c384cbe9a..8d16629cc 100644
--- a/roles/openshift_repos/tasks/rhel_repos.yml
+++ b/roles/openshift_repos/tasks/rhel_repos.yml
@@ -6,11 +6,10 @@
failed_when: repo_rhui.rc == 11
- name: Disable RHEL rhui repositories
- command: bash -c "yum-config-manager \
+ command: yum-config-manager \
--disable 'rhui-REGION-client-config-server-7' \
--disable 'rhui-REGION-rhel-server-rh-common' \
- --disable 'rhui-REGION-rhel-server-releases' \
- --disable 'rhui-REGION-client-config-server-7'"
+ --disable 'rhui-REGION-rhel-server-releases'
when: repo_rhui.changed
- name: Ensure RHEL repositories are enabled
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2
new file mode 100644
index 000000000..db214af2c
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin37]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin37/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin37/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin37/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
index 72c47b8ee..14f1f72c2 100644
--- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
@@ -6,15 +6,6 @@
import re
-# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def vars_with_pattern(source, pattern=""):
''' Returns a list of variables whose name matches the given pattern '''
if source == '':
@@ -39,6 +30,5 @@ class FilterModule(object):
def filters(self):
''' Returns the names of the filters provided by this class '''
return {
- 'map_from_pairs': map_from_pairs,
'vars_with_pattern': vars_with_pattern
}
diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml
index f5b37186e..cde3eccb6 100644
--- a/roles/openshift_sanitize_inventory/meta/main.yml
+++ b/roles/openshift_sanitize_inventory/meta/main.yml
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: lib_openshift
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 651d896cf..62d460272 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -3,37 +3,11 @@
# the user would also be aware of any deprecated variables they should note to adjust
- include_tasks: deprecations.yml
-- name: Abort when conflicting deployment type variables are set
- when:
- - deployment_type is defined
- - openshift_deployment_type is defined
- - openshift_deployment_type != deployment_type
- fail:
- msg: |-
- openshift_deployment_type is set to "{{ openshift_deployment_type }}".
- deployment_type is set to "{{ deployment_type }}".
- To avoid unexpected results, this conflict is not allowed.
- deployment_type is deprecated in favor of openshift_deployment_type.
- Please specify only openshift_deployment_type, or make both the same.
-
- name: Standardize on latest variable names
set_fact:
- # goal is to deprecate deployment_type in favor of openshift_deployment_type.
- # both will be accepted for now, but code should refer to the new name.
- # TODO: once this is well-documented, add deprecation notice if using old name.
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
- openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
-- name: Abort when deployment type is invalid
- # this variable is required; complain early and clearly if it is invalid.
- when: openshift_deployment_type not in known_openshift_deployment_types
- fail:
- msg: |-
- Please set openshift_deployment_type to one of:
- {{ known_openshift_deployment_types | join(', ') }}
-
- name: Normalize openshift_release
set_fact:
# Normalize release if provided, e.g. "v3.5" => "3.5"
diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index 0fc2372d2..df15948d2 100644
--- a/roles/openshift_sanitize_inventory/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
@@ -1,7 +1,4 @@
---
-# origin uses community packages named 'origin'
-# openshift-enterprise uses Red Hat packages named 'atomic-openshift'
-known_openshift_deployment_types: ['origin', 'openshift-enterprise']
__deprecation_header: "[DEPRECATION WARNING]:"
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 7c848cb12..15ca9838c 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,6 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+openshift_service_catalog_async_bindings_enabled: false
openshift_use_openshift_sdn: True
# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cd7bda2c6..72110b18c 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -12,7 +12,7 @@
- name: Generate signing cert
command: >
- {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
--key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
--serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
@@ -59,11 +59,6 @@
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
-- shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
- register: get_apiservices
- changed_when: no
-
- name: Create api service
oc_obj:
state: present
@@ -86,4 +81,3 @@
caBundle: "{{ apiserver_ca.content }}"
groupPriorityMinimum: 20
versionPriority: 10
- when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 41a6691c9..9b38a85c4 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -6,10 +6,10 @@
register: mktemp
changed_when: False
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: Set service_catalog image facts
@@ -38,7 +38,7 @@
- name: Make kube-service-catalog project network global
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
- include_tasks: generate_certs.yml
@@ -88,14 +88,14 @@
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -111,14 +111,14 @@
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: view
@@ -134,14 +134,14 @@
vars:
original_content: "{{ view_yaml.results.results[0] | to_yaml }}"
when:
- - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+ - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update view role for service catalog access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
when:
- - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+ - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
@@ -179,6 +179,8 @@
etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}"
etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}"
node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+ # apiserver_ca is defined in generate_certs.yml
+ ca_hash: "{{ apiserver_ca.content|hash('sha1') }}"
- name: Set Service Catalog API Server daemonset
oc_obj:
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index a832e1f85..aa32d0513 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,7 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
# TODO: this module doesn't currently remove this
#- name: Remove service catalog api service
@@ -48,7 +48,7 @@
- name: Remove Service Catalog kube-system Role Bindinds
shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
@@ -58,7 +58,7 @@
- name: Remove Service Catalog kube-service-catalog Role Bindinds
shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
index b143292b6..84e542eaf 100644
--- a/roles/openshift_service_catalog/tasks/start_api_server.yml
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -5,7 +5,7 @@
name: "{{ openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | lib_utils_oo_dict_to_list_of_dict }}"
# wait to see that the apiserver is available
- name: wait for api server to be ready
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 4f51b8c3c..e345df32c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -14,6 +14,8 @@ spec:
type: RollingUpdate
template:
metadata:
+ annotations:
+ ca_hash: {{ ca_hash }}
labels:
app: apiserver
spec:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 137222f04..c61e05f73 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -8,7 +8,7 @@ spec:
selector:
matchLabels:
app: controller-manager
- strategy:
+ updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
@@ -38,6 +38,10 @@ spec:
- "5m"
- --feature-gates
- OriginatingIdentity=true
+{% if openshift_service_catalog_async_bindings_enabled | bool %}
+ - --feature-gates
+ - AsyncBindingOperations=true
+{% endif %}
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index da34fab2a..4cbe262d2 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -6,16 +6,16 @@ openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_gluste
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_storageclass_default: False
-openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
-openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
+openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
openshift_storage_glusterfs_block_host_vol_create: True
openshift_storage_glusterfs_block_host_vol_size: 100
openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
-openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
+openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
openshift_storage_glusterfs_s3_account: "{{ omit }}"
openshift_storage_glusterfs_s3_user: "{{ omit }}"
@@ -29,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is
openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_cli: 'heketi-cli'
-openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"
openshift_storage_glusterfs_heketi_user_key: "{{ omit }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
new file mode 100644
index 000000000..63dd5cce6
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
@@ -0,0 +1,104 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
deleted file mode 100644
index a86c96df7..000000000
--- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
+++ /dev/null
@@ -1,23 +0,0 @@
-'''
- Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
-'''
-
-
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
-# pylint: disable=too-few-public-methods
-class FilterModule(object):
- ''' OpenShift Storage GlusterFS Filters '''
-
- # pylint: disable=no-self-use, too-few-public-methods
- def filters(self):
- ''' Returns the names of the filters provided by this class '''
- return {
- 'map_from_pairs': map_from_pairs
- }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 6a4ef942b..aa20245d5 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -12,4 +12,4 @@ galaxy_info:
dependencies:
- role: openshift_facts
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
index 1664ecc1e..5b4c16740 100644
--- a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
@@ -63,7 +63,7 @@
until:
- "gluster_s3_pvcs.results.results[0]['items'] | count > 0"
# Pod's 'Bound' status must be True
- - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
+ - "gluster_s3_pvcs.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
@@ -108,6 +108,6 @@
until:
- "gluster_s3_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "gluster_s3_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index d6be8c726..e5dcdcab7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -61,6 +61,6 @@
until:
- "glusterblock_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "glusterblock_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index d11023a39..001578406 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -2,7 +2,7 @@
- name: Make sure heketi-client is installed
package: name=heketi-client state=present
when:
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
- not glusterfs_heketi_is_native | bool
register: result
until: result is succeeded
@@ -126,7 +126,7 @@
- "glusterfs_heketi_is_native"
- "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Check for existing heketi pod
oc_obj:
@@ -144,7 +144,7 @@
- "glusterfs_heketi_is_native"
- "heketi_pod.results.results[0]['items'] | count > 0"
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Generate topology file
template:
@@ -177,14 +177,14 @@
- name: Generate heketi admin key
set_fact:
- glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+ glusterfs_heketi_admin_key: "{{ 32 | lib_utils_oo_generate_secret }}"
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_admin_key is undefined
- name: Generate heketi user key
set_fact:
- glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+ glusterfs_heketi_user_key: "{{ 32 | lib_utils_oo_generate_secret }}"
until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key"
delay: 1
retries: 10
@@ -228,7 +228,7 @@
until:
- "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when:
@@ -238,14 +238,14 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
- name: Place heketi topology on heketi Pod
- shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
+ shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
when:
- glusterfs_heketi_is_native
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 2ea7286f3..a374df0ce 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 0c2fcb2c5..4cc82f1ad 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -21,7 +21,7 @@
name: "{{ hostvars[item].openshift.node.nodename }}"
kind: node
state: absent
- labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
when: "'openshift' in hostvars[item] and glusterfs_wipe"
@@ -60,7 +60,7 @@
name: "{{ hostvars[item].openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- name: Copy GlusterFS DaemonSet template
@@ -109,6 +109,6 @@
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+ - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index b7cff6514..544a6f491 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index d23bd42b9..c0a8c53de 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -15,7 +15,7 @@
# Need `command` here because heketi-storage.json contains multiple objects.
- name: Copy heketi DB to GlusterFS volume
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+ command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
when: setup_storage.rc == 0
- name: Wait for copy job to finish
@@ -28,14 +28,14 @@
until:
- "'results' in heketi_job.results and heketi_job.results.results | count > 0"
# Pod's 'Complete' status must be True
- - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
# Fail when pod's 'Failed' status is True
- - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
when: setup_storage.rc == 0
- name: Delete deploy resources
@@ -120,13 +120,13 @@
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..ca87807fe
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index d61e6873a..3ae04e59f 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
- role: openshift_facts
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index cc674d3fd..a11219f6d 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -1,7 +1,7 @@
# openshift_storage_nfs_lvm
This role is useful to create and export nfs disks for openshift persistent volumes.
-It does so by creating lvm partitions on an already setup pv/vg, creating xfs
+It does so by creating lvm partitions on an already setup pv/vg, creating xfs
filesystem on each partition, mounting the partitions, exporting the mounts via NFS
and creating a json file for each mount that an openshift master can use to
create persistent volumes.
@@ -20,7 +20,7 @@ create persistent volumes.
osnl_nfs_export_options: "*(rw,sync,all_squash)"
# Directory, where the created partitions should be mounted. They will be
-# mounted as <osnl_mount_dir>/<lvm volume name>
+# mounted as <osnl_mount_dir>/<lvm volume name>
osnl_mount_dir: /exports/openshift
# Volume Group to use.
@@ -64,11 +64,10 @@ None
## Example Playbook
With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004
-Both of them are mounted into `/exports/openshift` directory. Both directories are
+Both of them are mounted into `/exports/openshift` directory. Both directories are
exported via NFS. json files are created in /root.
- hosts: nfsservers
- become: no
remote_user: root
gather_facts: no
roles:
@@ -94,7 +93,6 @@ exported via NFS. json files are created in /root.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index 50d94f6a3..de47708a5 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
- openshift
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index c8e7b6d7c..ff92e59e5 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -2,7 +2,7 @@
# TODO -- this may actually work on atomic hosts
- fail:
msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create lvm volumes
lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 94dc63bd2..9a72adbdc 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,7 +1,7 @@
---
- name: Install NFS server
package: name=nfs-utils state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 354699637..e2e6538c9 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -8,3 +8,5 @@ openshift_service_type_dict:
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
openshift_use_crio_only: False
+
+l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}"
diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml
new file mode 100644
index 000000000..fea0daf77
--- /dev/null
+++ b/roles/openshift_version/tasks/check_available_rpms.yml
@@ -0,0 +1,10 @@
+---
+- name: Get available {{ openshift_service_type}} version
+ repoquery:
+ name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
+ ignore_excluders: true
+ register: rpm_results
+
+- fail:
+ msg: "Package {{ openshift_service_type}} not found"
+ when: not rpm_results.results.package_found
diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml
new file mode 100644
index 000000000..374725086
--- /dev/null
+++ b/roles/openshift_version/tasks/first_master.yml
@@ -0,0 +1,30 @@
+---
+# Determine the openshift_version to configure if none has been specified or set previously.
+
+# Protect the installed version by default unless explicitly told not to, or given an
+# openshift_version already.
+- name: Use openshift.common.version fact as version to configure if already installed
+ set_fact:
+ openshift_version: "{{ openshift.common.version }}"
+ when:
+ - openshift.common.version is defined
+ - openshift_version is not defined or openshift_version == ""
+ - openshift_protect_installed_version | bool
+
+- include_tasks: "{{ l_first_master_version_task_file }}"
+
+- block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
+
+- block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
+ when: openshift_image_tag is not defined
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index 71f957b78..3ed1d2cfe 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -7,6 +7,7 @@
when:
- openshift_image_tag is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -20,7 +21,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined
+ - openshift_version is not defined or openshift_version_reinit | default(false)
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -34,7 +35,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or openshift_version_reinit | default(false)
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
@@ -62,4 +63,4 @@
# dangly +c0mm1t-offset tags in the version. See also,
# openshift_facts.py
- set_fact:
- openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}"
+ openshift_version: "{{ openshift_version | lib_utils_oo_chomp_commit_offset }}"
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
new file mode 100644
index 000000000..5d92f90c6
--- /dev/null
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -0,0 +1,20 @@
+---
+- name: Set rpm version to configure if openshift_pkg_version specified
+ set_fact:
+ # Expects a leading "-" in inventory, strip it off here, and remove trailing release,
+ openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
+ when:
+ - openshift_pkg_version is defined
+ - openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
+
+# These tasks should only be run against masters and nodes
+- name: Set openshift_version for rpm installation
+ include_tasks: check_available_rpms.yml
+
+- set_fact:
+ openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
+- set_fact:
+ openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version_reinit | default(false)
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index e50d5371e..b42794858 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -1,210 +1,2 @@
---
-# Determine the openshift_version to configure if none has been specified or set previously.
-
-- set_fact:
- is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
- is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
-
-# Block attempts to install origin without specifying some kind of version information.
-# This is because the latest tags for origin are usually alpha builds, which should not
-# be used by default. Users must indicate what they want.
-- name: Abort when we cannot safely guess what Origin image version the user wanted
- fail:
- msg: |-
- To install a containerized Origin release, you must set openshift_release or
- openshift_image_tag in your inventory to specify which version of the OpenShift
- component images to use. You may want the latest (usually alpha) releases or
- a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
- when:
- - is_containerized | bool
- - openshift.common.deployment_type == 'origin'
- - openshift_release is not defined
- - openshift_image_tag is not defined
-
-# Normalize some values that we need in a certain format that might be confusing:
-- set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when:
- - openshift_release is defined
- - openshift_release[0] == 'v'
-
-- set_fact:
- openshift_release: "{{ openshift_release | string }}"
- when:
- - openshift_release is defined
-
-# Verify that the image tag is in a valid format
-- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
- block:
-
- # Verifies that when the deployment type is origin the version:
- # - starts with a v
- # - Has 3 integers seperated by dots
- # It also allows for optional trailing data which:
- # - must start with a dash
- # - may contain numbers, letters, dashes and dots.
- - name: (Origin) Verify openshift_image_tag is valid
- when: openshift.common.deployment_type == 'origin'
- assert:
- that:
- - "{{ openshift_image_tag is match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: |-
- openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
- You specified openshift_image_tag={{ openshift_image_tag }}
-
- # Verifies that when the deployment type is openshift-enterprise the version:
- # - starts with a v
- # - Has at least 2 integers seperated by dots
- # It also allows for optional trailing data which:
- # - must start with a dash
- # - may contain numbers
- # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192)
- #
- - name: (Enterprise) Verify openshift_image_tag is valid
- when: openshift.common.deployment_type == 'openshift-enterprise'
- assert:
- that:
- - "{{ openshift_image_tag is match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
- msg: |-
- openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
- v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
- You specified openshift_image_tag={{ openshift_image_tag }}
-
-# Make sure we copy this to a fact if given a var:
-- set_fact:
- openshift_version: "{{ openshift_version | string }}"
- when: openshift_version is defined
-
-# Protect the installed version by default unless explicitly told not to, or given an
-# openshift_version already.
-- name: Use openshift.common.version fact as version to configure if already installed
- set_fact:
- openshift_version: "{{ openshift.common.version }}"
- when:
- - openshift.common.version is defined
- - openshift_version is not defined or openshift_version == ""
- - openshift_protect_installed_version | bool
-
-# The rest of these tasks should only execute on
-# masters and nodes as we can verify they have subscriptions
-- when:
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
- block:
- - name: Set openshift_version for rpm installation
- include_tasks: set_version_rpm.yml
- when: not is_containerized | bool
-
- - name: Set openshift_version for containerized installation
- include_tasks: set_version_containerized.yml
- when: is_containerized | bool
-
- - block:
- - name: Get available {{ openshift_service_type}} version
- repoquery:
- name: "{{ openshift_service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift_service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
- when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
- # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
- # NOTE: This will need to be modified/removed for future container + rpm installations work.
- - name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
- # At this point we know openshift_version is set appropriately. Now we set
- # openshift_image_tag and openshift_pkg_version, so all roles can always assume
- # each of this variables *will* be set correctly and can use them per their
- # intended purpose.
-
- - block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
-
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
-
- when: openshift_image_tag is not defined
-
- - block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
-
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
-
- when:
- - openshift_pkg_version is not defined
- - openshift_upgrade_target is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when:
- - openshift_pkg_version is not defined
- - openshift_upgrade_target is not defined
-
-
- - fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
-
- # We can't map an openshift_release to full rpm version like we can with containers; make sure
- # the rpm version we looked up matches the release requested and error out if not.
- - name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
-
- # The end result of these three variables is quite important so make sure they are displayed and logged:
- - debug: var=openshift_release
-
- - debug: var=openshift_image_tag
-
- - debug: var=openshift_pkg_version
+# This role is meant to be used with include_role.
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
new file mode 100644
index 000000000..eddd5ff42
--- /dev/null
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -0,0 +1,42 @@
+---
+# These tasks should only be run against masters and nodes
+
+- block:
+ - name: Check openshift_version for rpm installation
+ include_tasks: check_available_rpms.yml
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_version not in rpm_results.results.versions.available_versions.0
+ - openshift_version_reinit | default(false)
+
+ # block when
+ when: not openshift_is_atomic | bool
+
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
+# the rpm version we looked up matches the release requested and error out if not.
+- name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not openshift_is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - l_rpm_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ l_rpm_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
+ vars:
+ l_rpm_version: "{{ rpm_results.results.versions.available_versions.0 }}"
+
+# The end result of these three variables is quite important so make sure they are displayed and logged:
+- debug: var=openshift_release
+
+- debug: var=openshift_image_tag
+
+- debug: var=openshift_pkg_version
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
deleted file mode 100644
index c7ca5ceae..000000000
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Set rpm version to configure if openshift_pkg_version specified
- set_fact:
- # Expects a leading "-" in inventory, strip it off here, and remove trailing release,
- openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
- when:
- - openshift_pkg_version is defined
- - openshift_version is not defined
-
-- block:
- - name: Get available {{ openshift_service_type}} version
- repoquery:
- name: "{{ openshift_service_type}}"
- ignore_excluders: true
- register: rpm_results
-
- - fail:
- msg: "Package {{ openshift_service_type}} not found"
- when: not rpm_results.results.package_found
-
- - set_fact:
- openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- when:
- - openshift_version is not defined
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
new file mode 100644
index 000000000..4f395398c
--- /dev/null
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
+openshift_web_console_nodeselector: {"region":"infra"}
diff --git a/roles/openshift_web_console/meta/main.yaml b/roles/openshift_web_console/meta/main.yaml
new file mode 100644
index 000000000..033c1e3a3
--- /dev/null
+++ b/roles/openshift_web_console/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift web console
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
new file mode 100644
index 000000000..50e72657f
--- /dev/null
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -0,0 +1,111 @@
+---
+# Fact setting
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set openshift_web_console facts
+ set_fact:
+ openshift_web_console_prefix: "{{ openshift_web_console_prefix | default(__openshift_web_console_prefix) }}"
+ openshift_web_console_version: "{{ openshift_web_console_version | default(__openshift_web_console_version) }}"
+ openshift_web_console_image_name: "{{ openshift_web_console_image_name | default(__openshift_web_console_image_name) }}"
+ # Default the replica count to the number of masters.
+ openshift_web_console_replica_count: "{{ openshift_web_console_replica_count | default(groups.oo_masters_to_config | length) }}"
+
+- name: Ensure openshift-web-console project exists
+ oc_project:
+ name: openshift-web-console
+ state: present
+ node_selector:
+ - ""
+
+- name: Make temp directory for the web console config files
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Copy the web console config template to temp directory
+ copy:
+ src: "{{ __console_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
+ - "{{ __console_config_file }}"
+
+- name: Update the web console config properties
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+
+ # TODO: The new extensions properties cannot be set until
+ # origin-web-console-server has been updated with the API changes since
+ # `extensions` in the old asset config was an array.
+
+ # - key: extensions#scriptURLs
+ # value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ # - key: extensions#stylesheetURLs
+ # value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ # - key: extensions#properties
+ # value: "{{ openshift_web_console_extension_properties | default({}) }}"
+
+ # DEPRECATED PROPERTIES
+ # These properties have been renamed and will be removed from the install
+ # in a future pull. Keep both the old and new properties for now so that
+ # the install is not broken while the origin-web-console image is updated.
+ - key: publicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: logoutURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ separator: '#'
+ state: present
+
+- slurp:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ register: config
+
+- name: Reconcile with the web console RBAC file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+
+- name: Apply the web console template file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
+ --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
+ --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
+ --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
+ | {{ openshift_client_binary }} apply -f -
+
+- name: Verify that the web console is running
+ command: >
+ curl -k https://webconsole.openshift-web-console.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: console_health
+ until: console_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+- name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
diff --git a/roles/openshift_web_console/tasks/main.yml b/roles/openshift_web_console/tasks/main.yml
new file mode 100644
index 000000000..937bebf25
--- /dev/null
+++ b/roles/openshift_web_console/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include_tasks: install.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- include_tasks: remove.yml
+ when: not openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_web_console/tasks/remove.yml b/roles/openshift_web_console/tasks/remove.yml
new file mode 100644
index 000000000..f0712a993
--- /dev/null
+++ b/roles/openshift_web_console/tasks/remove.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove openshift-web-console project
+ oc_project:
+ name: openshift-web-console
+ state: absent
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
new file mode 100644
index 000000000..e347c0193
--- /dev/null
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -0,0 +1,71 @@
+---
+# This task updates asset config values in the webconsole-config config map in
+# the openshift-web-console namespace. The values to set are pased in the
+# variable `console_config_edits`, which is an array of objects with `key` and
+# `value` properties in the same format as `yedit` module `edits`. Only
+# properties passed are updated. The separator for nested properties is `#`.
+#
+# Note that this triggers a redeployment on the console and a brief downtime
+# since it uses a `Recreate` strategy.
+#
+# Example usage:
+#
+# - include_role:
+# name: openshift_web_console
+# tasks_from: update_console_config.yml
+# vars:
+# console_config_edits:
+# - key: clusterInfo#loggingPublicURL
+# value: "https://{{ openshift_logging_kibana_hostname }}"
+# when: openshift_web_console_install | default(true) | bool
+
+- name: Read web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config
+
+- name: Make temp directory
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp_console
+ changed_when: False
+
+- name: Copy web console config to temp file
+ copy:
+ content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+- name: Change web console config properties
+ yedit:
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
+
+- name: Update web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: present
+ from_file:
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+- name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp_console.stdout }}"
+ changed_when: False
+
+# TODO: Only rollout if config has changed.
+# There's currently no command to trigger a rollout for a k8s deployment
+# without changing the pod spec. Add an annotation to force a rollout after
+# the config map has been edited.
+- name: Rollout updated web console deployment
+ oc_edit:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ separator: '#'
+ content:
+ spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
new file mode 100644
index 000000000..7adb8a0d0
--- /dev/null
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -0,0 +1,4 @@
+---
+__openshift_web_console_prefix: "docker.io/openshift/"
+__openshift_web_console_version: "latest"
+__openshift_web_console_image_name: "origin-web-console"
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
new file mode 100644
index 000000000..e91048e38
--- /dev/null
+++ b/roles/openshift_web_console/vars/main.yml
@@ -0,0 +1,6 @@
+---
+__console_files_location: "../../../files/origin-components/"
+
+__console_template_file: "console-template.yaml"
+__console_rbac_file: "console-rbac-template.yaml"
+__console_config_file: "console-config.yaml"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..721ac1d27
--- /dev/null
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -0,0 +1,4 @@
+---
+__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_web_console_version: "v3.9"
+__openshift_web_console_image_name: "ose-web-console"
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index be0b8291a..5ee11f7bd 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -32,7 +32,7 @@ Use iptables:
---
- hosts: servers
task:
- - include_role:
+ - import_role:
name: os_firewall
vars:
os_firewall_use_firewalld: false
@@ -44,7 +44,7 @@ Use firewalld:
- hosts: servers
vars:
tasks:
- - include_role:
+ - import_role:
name: os_firewall
vars:
os_firewall_use_firewalld: true
diff --git a/roles/template_service_broker/meta/main.yml b/roles/template_service_broker/meta/main.yml
index ab5a0cf08..f1b56b771 100644
--- a/roles/template_service_broker/meta/main.yml
+++ b/roles/template_service_broker/meta/main.yml
@@ -11,3 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
+dependencies:
+- role: lib_utils
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 1253c1133..604e94602 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -1,9 +1,9 @@
---
# Fact setting
-- name: Set default image variables based on deployment type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: set template_service_broker facts
@@ -21,7 +21,6 @@
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
changed_when: False
- become: no
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
@@ -44,16 +43,16 @@
- name: Apply template file
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift.common.client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -80,10 +79,9 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
- file:
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
- become: no
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index 8b5593ff9..db1b558e4 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -2,7 +2,6 @@
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
changed_when: False
- become: no
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
@@ -13,11 +12,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
@@ -32,4 +31,3 @@
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
- become: no
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
index 77afe1f43..dc164a4db 100644
--- a/roles/template_service_broker/vars/default_images.yml
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_prefix: "docker.io/openshift/origin-"
__template_service_broker_version: "latest"
-__template_service_broker_image_name: "origin"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
index dfab1e01b..b65b97691 100644
--- a/roles/template_service_broker/vars/openshift-enterprise.yml
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-"
__template_service_broker_version: "v3.7"
-__template_service_broker_image_name: "ose"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml
index e95d274d5..4a28d47b2 100644
--- a/roles/tuned/tasks/main.yml
+++ b/roles/tuned/tasks/main.yml
@@ -11,7 +11,7 @@
block:
- name: Set tuned OpenShift variables
set_fact:
- openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
+ openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}"
- name: Ensure directory structure exists
file:
diff --git a/setup.py b/setup.py
index 5ba050b83..f6b494858 100644
--- a/setup.py
+++ b/setup.py
@@ -348,21 +348,10 @@ class OpenShiftAnsibleSyntaxCheck(Command):
# --syntax-check each entry point playbook
try:
# Create a host group list to avoid WARNING on unmatched host patterns
- host_group_list = [
- 'etcd,masters,nodes,OSEv3',
- 'oo_all_hosts',
- 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,'
- 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate',
- 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes',
- 'oo_nodes_to_config,oo_nodes_to_upgrade',
- 'oo_nodes_use_kuryr,oo_nodes_use_flannel',
- 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv',
- 'oo_lb_to_config',
- 'oo_nfs_to_config',
- 'glusterfs,glusterfs_registry,']
+ tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH']
subprocess.check_output(
- ['ansible-playbook', '-i ' + ','.join(host_group_list),
- '--syntax-check', playbook]
+ ['ansible-playbook', '-i', tox_ansible_inv,
+ '--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv)]
)
except subprocess.CalledProcessError as cpe:
print('{}Execution failed: {}{}'.format(
diff --git a/test/ci/README.md b/test/ci/README.md
new file mode 100644
index 000000000..fe80d7c04
--- /dev/null
+++ b/test/ci/README.md
@@ -0,0 +1,14 @@
+This directory contains scripts and other files that are executed by our
+CI integration tests.
+
+CI should call a script. The only arguments that each script should accept
+are:
+
+1) Path to openshift-ansible/playbooks
+2) Inventory path.
+3) Extra vars path.
+
+Ideally, inventory path and extra vars should live somewhere in this
+subdirectory instead of the CI's source.
+
+Extravars should typically be unnecessary.
diff --git a/test/ci/extra_vars/default.yml b/test/ci/extra_vars/default.yml
new file mode 100644
index 000000000..5b9a04cdd
--- /dev/null
+++ b/test/ci/extra_vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Using extra_vars is typically not ideal. Please don't use extra_vars
+# unless there is no other way to accomplish a task.
+openshift_this_var_is_not_used: True
diff --git a/test/ci/install.sh b/test/ci/install.sh
new file mode 100755
index 000000000..7172a6765
--- /dev/null
+++ b/test/ci/install.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -x
+
+# Argument 1: path to openshift-ansible/playbooks
+# Argument 2: inventory path
+# Argument 3: Extra vars path
+
+echo "Running prerequisites"
+
+ansible-playbook -vv \
+ --inventory $2 \
+ --e @$3 \
+ $1/prerequisites.yml
+
+echo "Running network_manager setup"
+
+playbook_base='/usr/share/ansible/openshift-ansible/playbooks/'
+if [[ -s "$1/openshift-node/network_manager.yml" ]]; then
+ playbook="$1/openshift-node/network_manager.yml"
+else
+ playbook="$1/byo/openshift-node/network_manager.yml"
+fi
+ansible-playbook -vv \
+ --inventory $1 \
+ --e @$2 \
+ ${playbook}
+
+echo "Running openshift-ansible deploy_cluster"
+
+ansible-playbook -vv \
+ --inventory $2 \
+ --e @$3 \
+ $1/deploy_cluster.yml
diff --git a/test/ci/inventory/group_vars/OSEv3/checks.yml b/test/ci/inventory/group_vars/OSEv3/checks.yml
new file mode 100644
index 000000000..26f825b07
--- /dev/null
+++ b/test/ci/inventory/group_vars/OSEv3/checks.yml
@@ -0,0 +1,4 @@
+---
+openshift_check_min_host_disk_gb: 10
+openshift_check_min_host_memory_gb: 8
+openshift_disable_check: package_update,package_availability
diff --git a/test/ci/inventory/group_vars/OSEv3/general.yml b/test/ci/inventory/group_vars/OSEv3/general.yml
new file mode 100644
index 000000000..d2fd3f74c
--- /dev/null
+++ b/test/ci/inventory/group_vars/OSEv3/general.yml
@@ -0,0 +1,23 @@
+---
+debug_level: 5
+osm_default_node_selector: "region=infra"
+osm_controller_args:
+ enable-hostpath-provisioner:
+ - "true"
+openshift_hosted_router_selector: "region=infra"
+openshift_hosted_router_create_certificate: true
+openshift_hosted_registry_selector: "region=infra"
+openshift_master_audit_config:
+ enabled: true
+openshift_master_identity_providers:
+ - name: "allow_all"
+ login: "true"
+ challenge: "true"
+ kind: "AllowAllPasswordIdentityProvider"
+openshift_template_service_broker_namespaces:
+ - "openshift"
+ansible_ssh_user: "ec2-user"
+enable_excluders: "false"
+osm_cluster_network_cidr: "10.128.0.0/14"
+openshift_portal_net: "172.30.0.0/16"
+osm_host_subnet_length: 9
diff --git a/test/ci/inventory/group_vars/OSEv3/logging.yml b/test/ci/inventory/group_vars/OSEv3/logging.yml
new file mode 100644
index 000000000..a55f110ad
--- /dev/null
+++ b/test/ci/inventory/group_vars/OSEv3/logging.yml
@@ -0,0 +1,37 @@
+---
+openshift_logging_use_mux: false
+openshift_logging_use_ops: true
+openshift_logging_es_log_appenders:
+ - "console"
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_audit_container_engine: true
+
+openshift_logging_curator_cpu_request: "100m"
+openshift_logging_curator_memory_limit: "32Mi"
+openshift_logging_curator_ops_cpu_request: "100m"
+openshift_logging_curator_ops_memory_limit: "32Mi"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
+openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
+openshift_logging_es_cpu_request: "400m"
+openshift_logging_es_memory_limit: "4Gi"
+openshift_logging_es_ops_cpu_request: "400m"
+openshift_logging_es_ops_memory_limit: "4Gi"
+openshift_logging_eventrouter_cpu_request: "100m"
+openshift_logging_eventrouter_memory_limit: "64Mi"
+openshift_logging_fluentd_cpu_request: "100m"
+openshift_logging_fluentd_memory_limit: "256Mi"
+openshift_logging_kibana_cpu_request: "100m"
+openshift_logging_kibana_memory_limit: "128Mi"
+openshift_logging_kibana_ops_cpu_request: "100m"
+openshift_logging_kibana_ops_memory_limit: "128Mi"
+openshift_logging_kibana_ops_proxy_cpu_request: "100m"
+openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
+openshift_logging_kibana_proxy_cpu_request: "100m"
+openshift_logging_kibana_proxy_memory_limit: "64Mi"
+openshift_logging_mux_cpu_request: "400m"
+openshift_logging_mux_memory_limit: "256Mi"
+
+# TODO: remove this once we have oauth-proxy images built that are in step
+# with the logging images (version and prefix)
+openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
+openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
diff --git a/test/ci/inventory/group_vars/all.yml b/test/ci/inventory/group_vars/all.yml
new file mode 100644
index 000000000..7848584d8
--- /dev/null
+++ b/test/ci/inventory/group_vars/all.yml
@@ -0,0 +1,13 @@
+---
+openshift_deployment_type: origin
+etcd_data_dir: "${ETCD_DATA_DIR}"
+openshift_node_port_range: '30000-32000'
+osm_controller_args:
+ enable-hostpath-provisioner:
+ - "true"
+
+# These env vars are created by the CI. This allows us
+# to test specific versions of openshift.
+openshift_pkg_version: "{{ lookup('env', 'ORIGIN_PKG_VERSION') }}"
+openshift_release: "{{ lookup('env', 'ORIGIN_RELEASE') }}"
+oreg_url: "openshift/origin-${component}:{{ lookup('env', 'ORIGIN_COMMIT') }}"
diff --git a/test/ci/inventory/host_vars/localhost.yml b/test/ci/inventory/host_vars/localhost.yml
new file mode 100644
index 000000000..2f308ab60
--- /dev/null
+++ b/test/ci/inventory/host_vars/localhost.yml
@@ -0,0 +1,8 @@
+---
+openshift_node_labels:
+ region: infra
+ zone: default
+openshift_schedulable: True
+ansible_become: True
+ansible_become_user: root
+ansible_connection: local
diff --git a/test/ci/inventory/local.txt b/test/ci/inventory/local.txt
new file mode 100644
index 000000000..90d5924a8
--- /dev/null
+++ b/test/ci/inventory/local.txt
@@ -0,0 +1,23 @@
+[OSEv3]
+
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+nfs
+
+[lb]
+# Empty, but present to pass integration tests.
+
+[nfs]
+# Empty, but present to pass integration tests.
+
+[masters]
+localhost
+
+[nodes]
+localhost
+
+[etcd]
+localhost
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
index 006a71bd9..451ac0972 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -4,7 +4,7 @@
vars:
image: preflight-aos-package-checks
l_host_vars:
- deployment_type: openshift-enterprise
+ openshift_deployment_type: openshift-enterprise
- name: Fail as required packages cannot be installed
hosts: all
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
index b4f18e3b5..e37487f13 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -3,7 +3,7 @@
vars:
image: preflight-aos-package-checks
l_host_vars:
- deployment_type: origin
+ openshift_deployment_type: origin
- name: Succeeds as Origin packages are public
hosts: all
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index 4e2b8a50c..9c845e1e5 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -3,7 +3,7 @@
vars:
image: preflight-aos-package-checks
l_host_vars:
- deployment_type: openshift-enterprise
+ openshift_deployment_type: openshift-enterprise
openshift_release: 3.2
- name: Success when AOS version matches openshift_release
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
index e1f8d74e6..9ae811939 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -4,7 +4,7 @@
vars:
image: preflight-aos-package-checks
l_host_vars:
- deployment_type: openshift-enterprise
+ openshift_deployment_type: openshift-enterprise
openshift_release: 3.2
- name: Failure when AOS version doesn't match openshift_release
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
deleted file mode 100644
index 36b8263bb..000000000
--- a/test/openshift_version_tests.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" Tests for the openshift_version Ansible filter module. """
-# pylint: disable=missing-docstring,invalid-name
-
-import os
-import sys
-import unittest
-
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../filter_plugins/")] + sys.path
-
-# pylint: disable=import-error
-import openshift_version # noqa: E402
-
-
-class OpenShiftVersionTests(unittest.TestCase):
-
- openshift_version_filters = openshift_version.FilterModule()
-
- def test_gte_filters(self):
- for major, minor_start, minor_end in self.openshift_version_filters.versions:
- for minor in range(minor_start, minor_end):
- # Test positive case
- self.assertTrue(
- self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
- "{}.{}".format(major, minor + 1)))
- # Test negative case
- self.assertFalse(
- self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
- "{}.{}".format(major, minor)))
-
- def test_get_filters(self):
- self.assertTrue(
- self.openshift_version_filters.filters() == self.openshift_version_filters._filters)
diff --git a/test/tox-inventory.txt b/test/tox-inventory.txt
new file mode 100644
index 000000000..ed9e946ab
--- /dev/null
+++ b/test/tox-inventory.txt
@@ -0,0 +1,109 @@
+[OSEv3]
+localhost
+
+
+[OSEv3:children]
+etcd
+masters
+nodes
+oo_all_hosts
+oo_etcd_to_config
+oo_new_etcd_to_config
+oo_first_etcd
+oo_etcd_hosts_to_backup
+oo_etcd_hosts_to_upgrade
+oo_etcd_to_migrate
+oo_hosts_containerized_managed_true
+oo_masters
+oo_masters_to_config
+oo_first_master
+oo_containerized_master_nodes
+oo_nodes_to_config
+oo_nodes_to_upgrade
+oo_nodes_use_kuryr
+oo_nodes_use_flannel
+oo_nodes_use_calico
+oo_nodes_use_nuage
+oo_nodes_use_contiv
+oo_lb_to_config
+oo_nfs_to_config
+glusterfs
+glusterfs_registry
+
+[etcd]
+localhost
+
+[masters]
+localhost
+
+[nodes]
+localhost
+
+[oo_all_hosts]
+localhost
+
+[oo_etcd_to_config]
+localhost
+
+[oo_new_etcd_to_config]
+localhost
+
+[oo_first_etcd]
+localhost
+
+[oo_etcd_hosts_to_backup]
+localhost
+
+[oo_etcd_hosts_to_upgrade]
+localhost
+
+[oo_etcd_to_migrate]
+localhost
+
+[oo_masters]
+localhost
+
+[oo_masters_to_config]
+localhost
+
+[oo_first_master]
+localhost
+
+[oo_containerized_master_nodes]
+localhost
+
+[oo_nodes_to_config]
+localhost
+
+[oo_nodes_to_upgrade]
+localhost
+
+[oo_nodes_use_kuryr]
+localhost
+
+[oo_nodes_use_flannel]
+localhost
+
+[oo_nodes_use_calico]
+localhost
+
+[oo_nodes_use_nuage]
+localhost
+
+[oo_nodes_use_contiv]
+localhost
+
+[oo_lb_to_config]
+localhost
+
+[oo_nfs_to_config]
+localhost
+
+[glusterfs]
+localhost
+
+[glusterfs_registry]
+localhost
+
+[oo_hosts_containerized_managed_true]
+localhost
diff --git a/test/tox-inventory.txt_extras b/test/tox-inventory.txt_extras
new file mode 100644
index 000000000..f73610570
--- /dev/null
+++ b/test/tox-inventory.txt_extras
@@ -0,0 +1,3 @@
+---
+hostvars:
+ localhost: {}
diff --git a/tox.ini b/tox.ini
index 9c35915d5..c11d12a58 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,7 @@ skip_missing_interpreters=True
[testenv]
skip_install=True
+setenv = TOX_ANSIBLE_INV_PATH = {toxinidir}/test/tox-inventory.txt
deps =
-rrequirements.txt
-rtest-requirements.txt
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index 433e29dde..6251cd22b 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -7,11 +7,7 @@ import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import u
-except ImportError:
- from ansible.module_utils.six import u
+from ansible.module_utils.six import u
# pylint: disable=super-init-not-called